-
Notifications
You must be signed in to change notification settings - Fork 7
/
production.sh
executable file
·214 lines (161 loc) · 7.36 KB
/
production.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
gcloud auth login
PROJECT=ink-server
gcloud config set project $PROJECT
WORKDIR=$(pwd)
HELM_VERSION=v2.16.1
HELM_PATH="$WORKDIR"/helm-"$HELM_VERSION"
wget https://storage.googleapis.com/kubernetes-helm/helm-"$HELM_VERSION"-linux-amd64.tar.gz
tar -xvzf helm-"$HELM_VERSION"-linux-amd64.tar.gz
mv linux-amd64 "$HELM_PATH"
git clone https://github.com/ahmetb/kubectx $WORKDIR/kubectx
export PATH=$PATH:$WORKDIR/kubectx
# Create a cluster in us-east and get its credentials
gcloud container clusters create \
--zone us-east4-a \
--num-nodes 1 \
--machine-type n1-standard-4 \
--async \
usa
# Create a cluster in eu-west and get its credentials
gcloud container clusters create \
--zone europe-west6-a \
--num-nodes 1 \
--machine-type n1-standard-4 \
--async \
europe
# sleep 240
gcloud container clusters list
# Or get credentials of already created clusters
# KUBECONFIG=clusters.yaml gcloud container clusters \
# get-credentials usa --zone=us-east4-a
# KUBECONFIG=clusters.yaml gcloud container clusters \
# get-credentials europe --zone=europe-west6-a
kubectl config use-context europe
kubectl config view --minify --flatten > europe.yaml
kubectl config use-context usa
kubectl config view --minify --flatten > usa.yaml
KUBECONFIG=europe.yaml:usa.yaml kubectl config view --flatten > clusters.yaml
# Connect to both clusters
export PROJECT_ID=$(gcloud info --format='value(config.project)')
gcloud container clusters get-credentials usa --zone us-east4-a --project ${PROJECT_ID}
gcloud container clusters get-credentials europe --zone europe-west6-a --project ${PROJECT_ID}
# Renaming contexts for convenience (Optional)
kubectx usa=gke_${PROJECT_ID}_us-east4-a_usa
kubectx europe=gke_${PROJECT_ID}_europe-west6-a_europe
## Setup europe
kubectx europe
kubectl create serviceaccount tiller --namespace kube-system
kubectl create clusterrolebinding tiller-admin-binding \
--clusterrole=cluster-admin --serviceaccount=kube-system:tiller
${HELM_PATH}/helm init --service-account=tiller
${HELM_PATH}/helm repo update
${HELM_PATH}/helm version
## Setup usa
kubectx usa
kubectl create serviceaccount tiller --namespace kube-system
kubectl create clusterrolebinding tiller-admin-binding \
--clusterrole=cluster-admin --serviceaccount=kube-system:tiller
${HELM_PATH}/helm init --service-account=tiller
${HELM_PATH}/helm repo update
${HELM_PATH}/helm version
## Wait for versions to be shown
## Install helm chart on europe and usa clusters
kubectx europe
${HELM_PATH}/helm install --name ink ../ink
## Install helm chart on europe and usa clusters
kubectx usa
${HELM_PATH}/helm install --name ink ../ink
## Update charts if you want to update
# No need if they have been just created
# kubectx europe
# ${HELM_PATH}/helm upgrade ink ../ink
# kubectx usa
# ${HELM_PATH}/helm upgrade ink ../ink
# Reserve global ip
gcloud compute addresses create --global production-ip
#Or list if you already have one
gcloud compute addresses list
# Install Istio per the instructions here https://istio.io/docs/tasks/traffic-management/ingress/ingress-certmgr/
istioctl manifest apply \
--set values.gateways.istio-ingressgateway.sds.enabled=true \
--set values.global.k8sIngress.enabled=true \
--set values.global.k8sIngress.enableHttps=true \
--set values.global.k8sIngress.gatewayName=ingressgateway
kubectl label namespace default istio-injection=enabled --overwrite
#If you are using istio, you have to do these steps again for europe and usa
kubectx europe
${HELM_PATH}/helm del --purge ink
${HELM_PATH}/helm install --name ink ../ink
kubectx usa
${HELM_PATH}/helm del --purge ink
${HELM_PATH}/helm install --name ink ../ink
# Wait until all Istio pods are ready...
kubectl get pods -n istio-system
# Wait a few minutes for the LoadBalancer which was created by the
# Istio install to be ready and show 3/3 pods.
# Create a DNS A record that points to the load balancer external IP - like host.example.com
# Set the $INGRESS_DOMAIN environment variable equal to the hostname of your cluster you
# created the DNS A record earlier.
INGRESS_DOMAIN=ink-remix.blockchain-it.hr
# Install Cert-Manager 0.13.0
kubectl create namespace cert-manager
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.13.0/cert-manager.yaml
# Wait for all 3 cert-manager pods to be running
kubectl get pods -n cert-manager
# Patch the Istio Gateway per the instructions.
kubectl -n istio-system \
patch gateway istio-autogenerated-k8s-ingress --type=json \
-p='[{"op": "replace", "path": "/spec/servers/1/tls", "value": {"credentialName": "ingress-cert", "mode": "SIMPLE", "privateKey": "sds", "serverCertificate": "sds"}}]'
# Now RESTART ISTIO and CERT-MANAGER or else they won't be able to resolve your new DNS entry.
# First, restart Istio
kubectl delete pods --all -n istio-system
# Wait and confirm that Istio is all up and running
kubectl get pods -n istio-system
# Restart Cert Manager
kubectl delete pods --all -n cert-manager
# Wait until Cert Manager is all back up and running again
kubectl get pods -n cert-manager
kubectl apply -f template.yaml
# Wait for the certificate to be ready:
kubectl -n istio-system describe certificate ingress-cert
# Wait for the Certificate Request (the cert id comes from the output of the above command)
kubectl -n istio-system describe certificaterequest ingress-cert-1972484071
# Wait for the Certificate Order (The order id comes from output of the above command)
kubectl -n istio-system describe order ingress-cert-1972484071-2644765445
# Wait for the challenge (the challenge id comes from the output of the above command)
kubectl -n istio-system describe challenge ingress-cert-4051514424-3229718444-3565931193
## Istio mesh
# All configuration is done again, but if you already used previous steps feel free to skip
# export WORKDIR=$(pwd)
# mkdir -p ${WORKDIR}
# cd ${WORKDIR}
# export MESH_ID=inkmesh
# export ORG_NAME=ink-remix
# wget https://raw.githubusercontent.com/istio/istio/release-1.4/samples/multicluster/setup-mesh.sh
# chmod +x setup-mesh.sh
#./setup-mesh.sh prep-mesh
# Add contexts to topology.yaml or renmae topology.example.yaml to topology.yaml
# ./setup-mesh.sh apply
# Get ingress IP of istio
# kubectl get svc istio-ingressgateway -n istio-system
# Add istio-injection to both default and ink namespaces
# kubectl label namespace cert-manager istio-injection=enabled
# kubectl label namespace default istio-injection=enabled
# kubectl label namespace ink istio-injection=enabled
# This way you deploy gateway for the helm chart
# ./deploy-to-multicluster.sh install
# If istio has external ip
# export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
# export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
# export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
# export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
# echo $GATEWAY_URL
# go to the link and see that app is working
# Delete all resources
# ./deploy_to_multicluster uninstall
# ./setup-mesh.sh teardown # Clear istio and all it's resources
# kubectl -n istio-system delete secret ingress-cert --namespace cert-manager
# # Debug
# kubectl get -o yaml \
# --all-namespaces \
# issuer,clusterissuer,certificates,orders,challenges