Skip to content

Commit

Permalink
Retry helm installs
Browse files Browse the repository at this point in the history
  • Loading branch information
c-w committed Feb 10, 2018
1 parent bddc46a commit 0011287
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 30 deletions.
24 changes: 15 additions & 9 deletions project-fortis-pipeline/ops/install-cassandra.sh
Expand Up @@ -8,15 +8,21 @@ cd charts || exit -2
readonly cluster_name="FORTIS_CASSANDRA"
readonly storageClass="fast"

# install cassandra
helm install \
--set replicaCount="${k8cassandra_node_count}" \
--set VmInstanceType="${agent_vm_size}" \
--set cassandra.ClusterName="${cluster_name}" \
--set persistence.storageClass="${storageClass}" \
--namespace cassandra \
--name cassandra-cluster \
./cassandra
install_cassandra() {
helm install \
--set replicaCount="${k8cassandra_node_count}" \
--set VmInstanceType="${agent_vm_size}" \
--set cassandra.ClusterName="${cluster_name}" \
--set persistence.storageClass="${storageClass}" \
--namespace cassandra \
--name cassandra-cluster \
./cassandra
}

while ! install_cassandra; do
echo "Failed to set up cassandra helm chart, retrying"
sleep 30s
done

# wait for all cassandra nodes to be ready
while [ -z "$(kubectl --namespace=cassandra get svc cassandra-cluster-cassan-ext -o jsonpath='{..ip}')" ]; do
Expand Down
50 changes: 29 additions & 21 deletions project-fortis-pipeline/ops/install-spark.sh
Expand Up @@ -76,27 +76,35 @@ kubectl create configmap "${spark_config_map_name}" \
--from-literal=PUBLISH_EVENTS_EVENTHUB_CONNECTION_STRING="${eh_conn_str}" \
--from-literal=PUBLISH_EVENTS_EVENTHUB_PATH="${eh_path}" \
--from-literal=PUBLISH_EVENTS_EVENTHUB_PARTITION="${eh_consumer_group}"
helm install \
--set Worker.Replicas="${k8spark_worker_count}" \
--set Master.ImageTag="2.2" \
--set Worker.ImageTag="2.2" \
--set Worker.ConfigMapName="${spark_config_map_name}" \
--set Master.ConfigMapName="${spark_config_map_name}" \
--set Master.SparkSubmitCommand="${spark_command}" \
--set Worker.VmInstanceType="${agent_vm_size}" \
--set Worker.Resources.Requests.Cpu="1" \
--set Worker.Resources.Requests.Memory="10Gi" \
--set Worker.Resources.Limits.Cpu="2.8" \
--set Worker.Resources.Limits.Memory="20Gi" \
--set Master.Resources.Requests.Cpu="1" \
--set Master.Resources.Requests.Memory="3Gi" \
--set Master.Resources.Limits.Cpu="2" \
--set Master.Resources.Limits.Memory="5Gi" \
--set Worker.Environment[0].name="SPARK_WORKER_MEMORY",Worker.Environment[0].value="20g" \
--set Worker.Environment[1].name="SPARK_WORKER_OPTS",Worker.Environment[1].value="-Dspark.worker.cleanup.enabled=true -Dspark.worker.cleanup.interval=1800 -Dspark.worker.cleanup.appDataTtl=3600" \
--namespace spark \
--name spark-cluster \
./spark

install_spark() {
helm install \
--set Worker.Replicas="${k8spark_worker_count}" \
--set Master.ImageTag="2.2" \
--set Worker.ImageTag="2.2" \
--set Worker.ConfigMapName="${spark_config_map_name}" \
--set Master.ConfigMapName="${spark_config_map_name}" \
--set Master.SparkSubmitCommand="${spark_command}" \
--set Worker.VmInstanceType="${agent_vm_size}" \
--set Worker.Resources.Requests.Cpu="1" \
--set Worker.Resources.Requests.Memory="10Gi" \
--set Worker.Resources.Limits.Cpu="2.8" \
--set Worker.Resources.Limits.Memory="20Gi" \
--set Master.Resources.Requests.Cpu="1" \
--set Master.Resources.Requests.Memory="3Gi" \
--set Master.Resources.Limits.Cpu="2" \
--set Master.Resources.Limits.Memory="5Gi" \
--set Worker.Environment[0].name="SPARK_WORKER_MEMORY",Worker.Environment[0].value="20g" \
--set Worker.Environment[1].name="SPARK_WORKER_OPTS",Worker.Environment[1].value="-Dspark.worker.cleanup.enabled=true -Dspark.worker.cleanup.interval=1800 -Dspark.worker.cleanup.appDataTtl=3600" \
--namespace spark \
--name spark-cluster \
./spark
}

while ! install_spark; do
echo "Failed to set up spark helm chart, retrying"
sleep 30s
done

# cleanup
cd ..

0 comments on commit 0011287

Please sign in to comment.