Permalink
Switch branches/tags
v2.2.0-alpha.00000000 v2.1.0-beta.20181015 v2.1.0-beta.20181008 v2.1.0-beta.20181001 v2.1.0-beta.20180924 v2.1.0-beta.20180917 v2.1.0-beta.20180910 v2.1.0-beta.20180904 v2.1.0-beta.20180827 v2.1.0-alpha.20180730 v2.1.0-alpha.20180702 v2.1.0-alpha.20180604 v2.1.0-alpha.20180507 v2.1.0-alpha.20180416 v2.1.0-alpha.00000000 v2.0.6 v2.0.6-rc.1 v2.0.5 v2.0.4 v2.0.3 v2.0.2 v2.0.1 v2.0.0 v2.0-rc.1 v2.0-beta.20180326 v2.0-beta.20180319 v2.0-beta.20180312 v2.0-beta.20180305 v2.0-alpha.20180212 v2.0-alpha.20180129 v2.0-alpha.20180122 v2.0-alpha.20180116 v2.0-alpha.20171218 v2.0-alpha.20171218-plus-left-join-fix v1.2-alpha.20171211 v1.2-alpha.20171204 v1.2-alpha.20171113 v1.2-alpha.20171026 v1.2-alpha.20170901 v1.1.9 v1.1.9-rc.1 v1.1.8 v1.1.7 v1.1.6 v1.1.5 v1.1.4 v1.1.3 v1.1.2 v1.1.1 v1.1.0 v1.1.0-rc.1 v1.1-beta.20170928 v1.1-beta.20170921 v1.1-beta.20170907 v1.1-alpha.20170817 v1.1-alpha.20170810 v1.1-alpha.20170803 v1.1-alpha.20170720 v1.1-alpha.20170713 v1.1-alpha.20170629 v1.1-alpha.20170622 v1.1-alpha.20170608 v1.1-alpha.20170601 v1.0.7 v1.0.6 v1.0.5 v1.0.4 v1.0.3 v1.0.2 v1.0.1 v1.0 v1.0-rc.3 v1.0-rc.2 v1.0-rc.1 v0.1-alpha beta-20170420 beta-20170413 beta-20170406 beta-20170330 beta-20170323 beta-20170309 beta-20170223 beta-20170216 beta-20170209 beta-20170126 beta-20170112 beta-20170105 beta-20161215 beta-20161208 beta-20161201 beta-20161110 beta-20161103 beta-20161027 beta-20161013 beta-20161006 beta-20160929 beta-20160915 beta-20160908 beta-20160829 beta-20160728
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
executable file 132 lines (115 sloc) 5.57 KB
#!/usr/bin/env python
import json
import os
from subprocess import check_call,check_output
from time import sleep
# Before running the script, fill in appropriate values for all the parameters
# above the dashed line.
# To get the names of your kubectl "contexts" for each of your clusters, run:
# kubectl config get-contexts
contexts = {
'us-central1-a': 'gke_cockroach-alex_us-central1-a_dns',
'us-central1-b': 'gke_cockroach-alex_us-central1-b_dns',
'us-west1-b': 'gke_cockroach-alex_us-west1-b_dns',
}
# Setting regions is optional, but recommended. If you aren't specifying them,
# remove the three lines below that create entries in the regions map.
regions = {
'us-central1-a': 'us-central1',
'us-central1-b': 'us-central1',
'us-west1-b': 'us-west1',
}
certs_dir = './certs'
ca_key_dir = './my-safe-directory'
generated_files_dir = './generated'
# Path to the cockroach binary on your local machine that you want to use
# generate certificates. Defaults to trying to find cockroach in your PATH.
# TODO: CHANGE BACK
cockroach_path = '../../../cockroach'
# ------------------------------------------------------------------------------
# First, set up the necessary directories and certificates.
try:
os.mkdir(certs_dir)
except OSError:
pass
try:
os.mkdir(ca_key_dir)
except OSError:
pass
try:
os.mkdir(generated_files_dir)
except OSError:
pass
check_call([cockroach_path, 'cert', 'create-ca', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key'])
check_call([cockroach_path, 'cert', 'create-client', 'root', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key'])
# For each cluster, create secrets containing the node and client certificates.
# Also create an internal load balancer to each cluster's DNS pods.
for zone, context in contexts.items():
check_call(['kubectl', 'create', 'namespace', zone, '--context', context])
check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.client.root', '--namespace', zone, '--from-file', certs_dir, '--context', context])
check_call([cockroach_path, 'cert', 'create-node', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key', 'localhost', '127.0.0.1', 'cockroachdb-public', 'cockroachdb-public.default' 'cockroachdb-public.'+zone, 'cockroachdb-public.%s.svc.cluster.local' % (zone), '*.cockroachdb', '*.cockroachdb.'+zone, '*.cockroachdb.%s.svc.cluster.local' % (zone)])
check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.node', '--namespace', zone, '--from-file', certs_dir, '--context', context])
check_call('rm %s/node.*' % (certs_dir), shell=True)
check_call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context])
# Set up each cluster to forward DNS requests for zone-scoped namespaces to the
# relevant cluster's DNS server, using internal load balancers in order to
# create a static IP for each cluster's DNS endpoint.
dns_ips = dict()
for zone, context in contexts.items():
external_ip = ''
while True:
external_ip = check_output(['kubectl', 'get', 'svc', 'kube-dns-lb', '--namespace', 'kube-system', '--context', context, '--template', '{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}'])
if external_ip:
break
print 'Waiting for DNS load balancer IP in %s...' % (zone)
sleep(10)
print 'DNS endpoint for zone %s: %s' % (zone, external_ip)
dns_ips[zone] = external_ip
# Update each cluster's DNS configuration with an appropriate configmap. Note
# that we have to leave the local cluster out of its own configmap to avoid
# infinite recursion through the load balancer IP. We then have to delete the
# existing DNS pods in order for the new configuration to take effect.
for zone, context in contexts.items():
remote_dns_ips = dict()
for z, ip in dns_ips.items():
if z == zone:
continue
remote_dns_ips[z+'.svc.cluster.local'] = [ip]
config_filename = '%s/dns-configmap-%s.yaml' % (generated_files_dir, zone)
with open(config_filename, 'w') as f:
f.write("""\
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
stubDomains: |
%s
""" % (json.dumps(remote_dns_ips)))
check_call(['kubectl', 'apply', '-f', config_filename, '--namespace', 'kube-system', '--context', context])
check_call(['kubectl', 'delete', 'pods', '-l', 'k8s-app=kube-dns', '--namespace', 'kube-system', '--context', context])
# Generate the join string to be used.
join_addrs = []
for zone in contexts:
for i in range(3):
join_addrs.append('cockroachdb-%d.cockroachdb.%s' % (i, zone))
join_str = ','.join(join_addrs)
# Create the cockroach resources in each cluster.
for zone, context in contexts.items():
if zone in regions:
locality = 'region=%s,zone=%s' % (regions[zone], zone)
else:
locality = 'zone=%s' % (zone)
yaml_file = '%s/cockroachdb-statefulset-%s.yaml' % (generated_files_dir, zone)
with open(yaml_file, 'w') as f:
check_call(['sed', 's/JOINLIST/%s/g;s/LOCALITYLIST/%s/g' % (join_str, locality), 'cockroachdb-statefulset-secure.yaml'], stdout=f)
check_call(['kubectl', 'apply', '-f', yaml_file, '--namespace', zone, '--context', context])
# Finally, initialize the cluster.
print 'Sleeping 30 seconds before attempting to initialize cluster to give time for volumes to be created and pods started.'
sleep(30)
for zone, context in contexts.items():
check_call(['kubectl', 'create', '-f', 'cluster-init-secure.yaml', '--namespace', zone, '--context', context])
# We only need run the init command in one zone given that all the zones are
# joined together as one cluster.
break