Skip to content

Commit

Permalink
Fix indentation and line continuations in hive test scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
jirassimok authored and losipiuk committed Sep 17, 2020
1 parent 432bf86 commit a381973
Show file tree
Hide file tree
Showing 4 changed files with 133 additions and 133 deletions.
136 changes: 68 additions & 68 deletions presto-hive-hadoop2/bin/common.sh
@@ -1,85 +1,85 @@
#!/usr/bin/env bash

function retry() {
local END
local EXIT_CODE
local END
local EXIT_CODE

END=$(($(date +%s) + 600))
END=$(($(date +%s) + 600))

while (( $(date +%s) < $END )); do
set +e
"$@"
EXIT_CODE=$?
set -e
while (( $(date +%s) < $END )); do
set +e
"$@"
EXIT_CODE=$?
set -e

if [[ ${EXIT_CODE} == 0 ]]; then
break
fi
sleep 5
done
if [[ ${EXIT_CODE} == 0 ]]; then
break
fi
sleep 5
done

return ${EXIT_CODE}
return ${EXIT_CODE}
}

function hadoop_master_container(){
docker-compose -f "${DOCKER_COMPOSE_LOCATION}" ps -q hadoop-master | grep .
docker-compose -f "${DOCKER_COMPOSE_LOCATION}" ps -q hadoop-master | grep .
}

function hadoop_master_ip() {
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $HADOOP_MASTER_CONTAINER
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $HADOOP_MASTER_CONTAINER
}

function check_hadoop() {
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status hive-server2 | grep -i running &> /dev/null && \
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status hive-metastore | grep -i running &> /dev/null && \
docker exec ${HADOOP_MASTER_CONTAINER} netstat -lpn | grep -i 0.0.0.0:10000 &> /dev/null &&
docker exec ${HADOOP_MASTER_CONTAINER} netstat -lpn | grep -i 0.0.0.0:9083 &> /dev/null
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status hive-server2 | grep -i running &> /dev/null &&
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status hive-metastore | grep -i running &> /dev/null &&
docker exec ${HADOOP_MASTER_CONTAINER} netstat -lpn | grep -i 0.0.0.0:10000 &> /dev/null &&
docker exec ${HADOOP_MASTER_CONTAINER} netstat -lpn | grep -i 0.0.0.0:9083 &> /dev/null
}

function exec_in_hadoop_master_container() {
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} "$@"
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} "$@"
}

function stop_unnecessary_hadoop_services() {
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl stop yarn-resourcemanager
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl stop yarn-nodemanager
HADOOP_MASTER_CONTAINER=$(hadoop_master_container)
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl status
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl stop yarn-resourcemanager
docker exec ${HADOOP_MASTER_CONTAINER} supervisorctl stop yarn-nodemanager
}

# Expands docker compose file paths files into the format "-f $1 -f $2 ...."
# Arguments:
# $1, $2, ...: A list of docker-compose files used to start/stop containers
function expand_compose_args() {
local files=( "${@}" )
local compose_args=""
for file in ${files[@]}; do
compose_args+=" -f ${file}"
done
echo "${compose_args}"
local files=( "${@}" )
local compose_args=""
for file in ${files[@]}; do
compose_args+=" -f ${file}"
done
echo "${compose_args}"
}

function cleanup_docker_containers() {
local compose_args="$(expand_compose_args "$@")"
# stop containers started with "up"
docker-compose ${compose_args} down --remove-orphans
local compose_args="$(expand_compose_args "$@")"
# stop containers started with "up"
docker-compose ${compose_args} down --remove-orphans

# docker logs processes are being terminated as soon as docker container are stopped
# wait for docker logs termination
wait
# docker logs processes are being terminated as soon as docker container are stopped
# wait for docker logs termination
wait
}

function cleanup_hadoop_docker_containers() {
cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}"
cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}"
}

function termination_handler(){
set +e
cleanup_docker_containers "$@"
exit 130
set +e
cleanup_docker_containers "$@"
exit 130
}

SCRIPT_DIR="${BASH_SOURCE%/*}"
Expand All @@ -95,42 +95,42 @@ docker version
# extract proxy IP
if [ -n "${DOCKER_MACHINE_NAME:-}" ]
then
PROXY=`docker-machine ip`
PROXY=`docker-machine ip`
else
PROXY=127.0.0.1
PROXY=127.0.0.1
fi

# Starts containers based on multiple docker compose locations
# Arguments:
# $1, $2, ...: A list of docker-compose files used to start containers
function start_docker_containers() {
local compose_args="$(expand_compose_args $@)"
# Purposefully don't surround ${compose_args} with quotes so that docker-compose infers multiple arguments
# stop already running containers
docker-compose ${compose_args} down || true

# catch terminate signals
# trap arguments are not expanded until the trap is called, so they must be in a global variable
TRAP_ARGS="$@"
trap 'termination_handler $TRAP_ARGS' INT TERM

# pull docker images
if [[ "${CONTINUOUS_INTEGRATION:-false}" == 'true' ]]; then
docker-compose ${compose_args} pull --quiet
fi

# start containers
docker-compose ${compose_args} up -d
local compose_args="$(expand_compose_args $@)"
# Purposefully don't surround ${compose_args} with quotes so that docker-compose infers multiple arguments
# stop already running containers
docker-compose ${compose_args} down || true

# catch terminate signals
# trap arguments are not expanded until the trap is called, so they must be in a global variable
TRAP_ARGS="$@"
trap 'termination_handler $TRAP_ARGS' INT TERM

# pull docker images
if [[ "${CONTINUOUS_INTEGRATION:-false}" == 'true' ]]; then
docker-compose ${compose_args} pull --quiet
fi

# start containers
docker-compose ${compose_args} up -d
}

function start_hadoop_docker_containers() {
start_docker_containers "${DOCKER_COMPOSE_LOCATION}"
start_docker_containers "${DOCKER_COMPOSE_LOCATION}"

# start docker logs for hadoop container
docker-compose -f "${DOCKER_COMPOSE_LOCATION}" logs --no-color hadoop-master &
# start docker logs for hadoop container
docker-compose -f "${DOCKER_COMPOSE_LOCATION}" logs --no-color hadoop-master &

# wait until hadoop processes is started
retry check_hadoop
# wait until hadoop processes is started
retry check_hadoop
}

function get_hive_major_version() {
Expand Down
88 changes: 44 additions & 44 deletions presto-hive-hadoop2/bin/run_hive_alluxio_tests.sh
Expand Up @@ -10,65 +10,65 @@ export ALLUXIO_IMAGE_TAG="2.1.2"
ALLUXIO_DOCKER_COMPOSE_LOCATION="${INTEGRATION_TESTS_ROOT}/conf/alluxio-docker.yml"

function check_alluxio() {
run_in_alluxio alluxio fsadmin report
run_in_alluxio alluxio fsadmin report
}

function run_in_alluxio() {
docker exec -e ALLUXIO_JAVA_OPTS=" -Dalluxio.master.hostname=localhost" \
"$(alluxio_master_container)" $@
"$(alluxio_master_container)" $@
}

# Arguments:
# $1: container name
function get_alluxio_container() {
docker-compose -f "${ALLUXIO_DOCKER_COMPOSE_LOCATION}" ps -q "$1" | grep .
docker-compose -f "${ALLUXIO_DOCKER_COMPOSE_LOCATION}" ps -q "$1" | grep .
}

function alluxio_master_container() {
get_alluxio_container alluxio-master
get_alluxio_container alluxio-master
}

function main () {
cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"
start_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"
retry check_hadoop
retry check_alluxio & # data can be generated while we wait for alluxio to start

# obtain Hive version
TESTS_HIVE_VERSION_MAJOR=$(get_hive_major_version)

# generate test data
exec_in_hadoop_master_container sudo -Eu hdfs hdfs dfs -mkdir /alluxio
exec_in_hadoop_master_container sudo -Eu hdfs hdfs dfs -chmod 777 /alluxio
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -f /docker/sql/create-test.sql
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -f "/docker/sql/create-test-hive-${TESTS_HIVE_VERSION_MAJOR}.sql"

# Alluxio currently doesn't support views
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -e 'DROP VIEW presto_test_view;'

stop_unnecessary_hadoop_services

wait # make sure alluxio has started

run_in_alluxio alluxio table attachdb hive thrift://hadoop-master:9083 default
run_in_alluxio alluxio table ls default

# run product tests
pushd ${PROJECT_ROOT}
set +e
./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2-alluxio \
-Dhive.hadoop2.alluxio.host=localhost \
-Dhive.hadoop2.alluxio.port=19998 \
-Dhive.hadoop2.hiveVersionMajor="${TESTS_HIVE_VERSION_MAJOR}" \
-Dhive.hadoop2.timeZone=Asia/Kathmandu \
-DHADOOP_USER_NAME=hive
EXIT_CODE=$?
set -e
popd

cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"

exit ${EXIT_CODE}
cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"
start_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"
retry check_hadoop
retry check_alluxio & # data can be generated while we wait for alluxio to start

# obtain Hive version
TESTS_HIVE_VERSION_MAJOR=$(get_hive_major_version)

# generate test data
exec_in_hadoop_master_container sudo -Eu hdfs hdfs dfs -mkdir /alluxio
exec_in_hadoop_master_container sudo -Eu hdfs hdfs dfs -chmod 777 /alluxio
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -f /docker/sql/create-test.sql
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -f "/docker/sql/create-test-hive-${TESTS_HIVE_VERSION_MAJOR}.sql"

# Alluxio currently doesn't support views
exec_in_hadoop_master_container sudo -Eu hive beeline -u jdbc:hive2://localhost:10000/default -n hive -e 'DROP VIEW presto_test_view;'

stop_unnecessary_hadoop_services

wait # make sure alluxio has started

run_in_alluxio alluxio table attachdb hive thrift://hadoop-master:9083 default
run_in_alluxio alluxio table ls default

# run product tests
pushd ${PROJECT_ROOT}
set +e
./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2-alluxio \
-Dhive.hadoop2.alluxio.host=localhost \
-Dhive.hadoop2.alluxio.port=19998 \
-Dhive.hadoop2.hiveVersionMajor="${TESTS_HIVE_VERSION_MAJOR}" \
-Dhive.hadoop2.timeZone=Asia/Kathmandu \
-DHADOOP_USER_NAME=hive
EXIT_CODE=$?
set -e
popd

cleanup_docker_containers "${DOCKER_COMPOSE_LOCATION}" "${ALLUXIO_DOCKER_COMPOSE_LOCATION}"

exit ${EXIT_CODE}
}

main
24 changes: 12 additions & 12 deletions presto-hive-hadoop2/bin/run_hive_s3_tests.sh
Expand Up @@ -13,10 +13,10 @@ test_directory="$(date '+%Y%m%d-%H%M%S')-$(uuidgen | sha1sum | cut -b 1-6)"
# TODO replace core-site.xml.s3-template with apply-site-xml-override.sh
exec_in_hadoop_master_container cp /docker/files/core-site.xml.s3-template /etc/hadoop/conf/core-site.xml
exec_in_hadoop_master_container sed -i \
-e "s|%AWS_ACCESS_KEY%|${AWS_ACCESS_KEY_ID}|g" \
-e "s|%AWS_SECRET_KEY%|${AWS_SECRET_ACCESS_KEY}|g" \
-e "s|%S3_BUCKET_ENDPOINT%|${S3_BUCKET_ENDPOINT}|g" \
/etc/hadoop/conf/core-site.xml
-e "s|%AWS_ACCESS_KEY%|${AWS_ACCESS_KEY_ID}|g" \
-e "s|%AWS_SECRET_KEY%|${AWS_SECRET_ACCESS_KEY}|g" \
-e "s|%S3_BUCKET_ENDPOINT%|${S3_BUCKET_ENDPOINT}|g" \
/etc/hadoop/conf/core-site.xml

# create test table
table_path="s3a://${S3_BUCKET}/${test_directory}/presto_test_external_fs/"
Expand Down Expand Up @@ -55,14 +55,14 @@ retry check_hadoop
pushd "${PROJECT_ROOT}"
set +e
./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2-s3 \
-DHADOOP_USER_NAME=hive \
-Dhive.hadoop2.metastoreHost=localhost \
-Dhive.hadoop2.metastorePort=9083 \
-Dhive.hadoop2.databaseName=default \
-Dhive.hadoop2.s3.awsAccessKey="${AWS_ACCESS_KEY_ID}" \
-Dhive.hadoop2.s3.awsSecretKey="${AWS_SECRET_ACCESS_KEY}" \
-Dhive.hadoop2.s3.writableBucket="${S3_BUCKET}" \
-Dhive.hadoop2.s3.testDirectory="${test_directory}"
-DHADOOP_USER_NAME=hive \
-Dhive.hadoop2.metastoreHost=localhost \
-Dhive.hadoop2.metastorePort=9083 \
-Dhive.hadoop2.databaseName=default \
-Dhive.hadoop2.s3.awsAccessKey="${AWS_ACCESS_KEY_ID}" \
-Dhive.hadoop2.s3.awsSecretKey="${AWS_SECRET_ACCESS_KEY}" \
-Dhive.hadoop2.s3.writableBucket="${S3_BUCKET}" \
-Dhive.hadoop2.s3.testDirectory="${test_directory}"
EXIT_CODE=$?
set -e
popd
Expand Down
18 changes: 9 additions & 9 deletions presto-hive-hadoop2/bin/run_hive_tests.sh
Expand Up @@ -22,15 +22,15 @@ HADOOP_MASTER_IP=$(hadoop_master_ip)
pushd "${PROJECT_ROOT}"
set +e
./mvnw -B -pl presto-hive-hadoop2 test -P test-hive-hadoop2 \
-DHADOOP_USER_NAME=hive \
-Dhive.hadoop2.metastoreHost=localhost \
-Dhive.hadoop2.metastorePort=9083 \
-Dhive.hadoop2.databaseName=default \
-Dhive.hadoop2.hiveVersionMajor="${TESTS_HIVE_VERSION_MAJOR}" \
-Dhive.hadoop2.timeZone=Asia/Kathmandu \
-Dhive.metastore.thrift.client.socks-proxy="${PROXY}:1180" \
-Dhive.hdfs.socks-proxy="${PROXY}:1180" \
-Dhadoop-master-ip="${HADOOP_MASTER_IP}"
-DHADOOP_USER_NAME=hive \
-Dhive.hadoop2.metastoreHost=localhost \
-Dhive.hadoop2.metastorePort=9083 \
-Dhive.hadoop2.databaseName=default \
-Dhive.hadoop2.hiveVersionMajor="${TESTS_HIVE_VERSION_MAJOR}" \
-Dhive.hadoop2.timeZone=Asia/Kathmandu \
-Dhive.metastore.thrift.client.socks-proxy="${PROXY}:1180" \
-Dhive.hdfs.socks-proxy="${PROXY}:1180" \
-Dhadoop-master-ip="${HADOOP_MASTER_IP}"
EXIT_CODE=$?
set -e
popd
Expand Down

0 comments on commit a381973

Please sign in to comment.