Skip to content

Commit

Permalink
Add config File support(apache#6787) (apache#6901)
Browse files Browse the repository at this point in the history
  • Loading branch information
GSharayu authored and harold-kfuse committed May 13, 2021
1 parent 8811ae4 commit 6197a01
Show file tree
Hide file tree
Showing 20 changed files with 139 additions and 529 deletions.
40 changes: 27 additions & 13 deletions compatibility-verifier/compCheck.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,30 @@ function waitForClusterReady() {
sleep 2
echo "Cluster ready."
}

#set config file is present or not
function setConfigFileArg() {
if [[ -f $1 ]]; then
echo "-configFileName ${1}"
fi
}

# Given a component and directory, start that version of the specific component
function startService() {
serviceName=$1
dirName=$2
local configFileArg=$(setConfigFileArg "$3")
# Upon start, save the pid of the process for a component into a file in /working_dir/{component}.pid, which is then used to stop it
pushd "$dirName"/pinot-tools/target/pinot-tools-pkg/bin || exit 1
if [ "$serviceName" = "zookeeper" ]; then
sh -c 'rm -rf ${0}/zkdir'
sh -c 'echo $$ > $0/zookeeper.pid; exec ./pinot-admin.sh StartZookeeper -dataDir ${0}/zkdir > ${0}/zookeeper.log 2>&1' "${dirName}" &
elif [ "$serviceName" = "controller" ]; then
sh -c 'echo $$ > $0/controller.pid; exec ./pinot-admin.sh StartController > ${0}/controller.log 2>&1' "${dirName}" &
sh -c 'echo $$ > $0/controller.pid; exec ./pinot-admin.sh StartController ${1} > ${0}/controller.log 2>&1' "${dirName}" "${configFileArg}" &
elif [ "$serviceName" = "broker" ]; then
sh -c 'echo $$ > $0/broker.pid; exec ./pinot-admin.sh StartBroker > ${0}/broker.log 2>&1' "${dirName}" &
sh -c 'echo $$ > $0/broker.pid; exec ./pinot-admin.sh StartBroker ${1} > ${0}/broker.log 2>&1' "${dirName}" "${configFileArg}" &
elif [ "$serviceName" = "server" ]; then
sh -c 'echo $$ > $0/server.pid; exec ./pinot-admin.sh StartServer > ${0}/server.log 2>&1' "${dirName}" &
sh -c 'echo $$ > $0/server.pid; exec ./pinot-admin.sh StartServer ${1} > ${0}/server.log 2>&1' "${dirName}" "${configFileArg}" &
elif [ "$serviceName" = "kafka" ]; then
sh -c 'echo $$ > $0/kafka.pid; exec ./pinot-admin.sh StartKafka -zkAddress localhost:2181/kafka > ${0}/kafka.log 2>&1' "${dirName}" &
fi
Expand Down Expand Up @@ -115,11 +124,11 @@ function startServices() {
startService zookeeper "$dirName"
# Controller depends on zookeeper, if not wait zookeeper to be ready, controller will crash.
waitForZkReady
startService controller "$dirName"
startService controller "$dirName" "$CONTROLLER_CONF"
# Broker depends on controller, if not wait controller to be ready, broker will crash.
waitForControllerReady
startService broker "$dirName"
startService server "$dirName"
startService broker "$dirName" "$BROKER_CONF"
startService server "$dirName" "$SERVER_CONF"
startService kafka "$dirName"
waitForKafkaReady
echo "Cluster started."
Expand Down Expand Up @@ -169,6 +178,11 @@ COMPAT_TESTER_PATH="pinot-integration-tests/target/pinot-integration-tests-pkg/b
# create subdirectories for given commits
workingDir=$1
testSuiteDir=$(absPath "$2")

BROKER_CONF=${testSuiteDir}/config/BrokerConfig.conf
CONTROLLER_CONF=${testSuiteDir}/config/ControllerConfig.conf
SERVER_CONF=${testSuiteDir}/config/ServerConfig.conf

oldTargetDir="$workingDir"/oldTargetDir
newTargetDir="$workingDir"/newTargetDir

Expand All @@ -184,28 +198,28 @@ fi

# Setup initial cluster with olderCommit and do rolling upgrade
# Provide abspath of filepath to $COMPAT_TESTER
startServices "$oldTargetDir"
startServices "$oldTargetDir" "${testSuiteDir}/config"
#$COMPAT_TESTER $testSuiteDir/pre-controller-upgrade.yaml 1; if [ $? -ne 0 ]; then exit 1; fi
stopService controller "$oldTargetDir"
startService controller "$newTargetDir"
startService controller "$newTargetDir" "$CONTROLLER_CONF"
waitForControllerReady
#$COMPAT_TESTER $testSuiteDir/pre-broker-upgrade.yaml 2; if [ $? -ne 0 ]; then exit 1; fi
stopService broker "$oldTargetDir"
startService broker "$newTargetDir"
startService broker "$newTargetDir" "$BROKER_CONF"
#$COMPAT_TESTER $testSuiteDir/pre-server-upgrade.yaml 3; if [ $? -ne 0 ]; then exit 1; fi
stopService server "$oldTargetDir"
startService server "$newTargetDir"
startService server "$newTargetDir" "$SERVER_CONF"
#$COMPAT_TESTER $testSuiteDir/post-server-upgrade.yaml 4; if [ $? -ne 0 ]; then exit 1; fi

# Upgrade completed, now do a rollback
stopService server "$newTargetDir"
startService server "$oldTargetDir"
startService server "$oldTargetDir" "$SERVER_CONF"
#$COMPAT_TESTER $testSuiteDir/post-server-rollback.yaml 5; if [ $? -ne 0 ]; then exit 1; fi
stopService broker "$newTargetDir"
startService broker "$oldTargetDir"
startService broker "$oldTargetDir" "$BROKER_CONF"
#$COMPAT_TESTER $testSuiteDir/post-broker-rollback.yaml 6; if [ $? -ne 0 ]; then exit 1; fi
stopService controller "$newTargetDir"
startService controller "$oldTargetDir"
startService controller "$oldTargetDir" "$CONTROLLER_CONF"
waitForControllerReady
#$COMPAT_TESTER $testSuiteDir/post-controller-rollback.yaml 7; if [ $? -ne 0 ]; then exit 1; fi
stopServices "$oldTargetDir"
Expand Down
24 changes: 0 additions & 24 deletions compatibility-verifier/sample-test-suite/config/kafka/topic1.cfg

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

41 changes: 13 additions & 28 deletions compatibility-verifier/sample-test-suite/post-broker-rollback.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,32 +18,17 @@
#

# Operations to be done.
description: Operations to be run after Broker downgrade
description: Operations to be run after broker rollback
operations:
- type: tableOp
description: Create table t1
op: CREATE
schemaFileName: /tmp/schema
tableConfigFileName: /tmp/tableConfig
- type: segmentOp
description: Add segments X and Y to T1
op: UPLOAD
inputDataFileName: /tmp/data.csv
tableConfigFileName: /tmp/tableConfig
- type: tableOp
# description: Remove table T1
op: DELETE
tableConfigFileName: /tmp/tableConfig
- type: queryOp
description: Run queries on T1
queryFileName: queries/T1.sql
expectedResultsFileName: queries/T1-results.sql
- type: streamOp
description: Produce rows to topic1
op: PRODUCE
numRows: 66
streamConfigFileName: kafka/topic1.cfg
inputDataFileName: kafka/input.csv
tableConfigFileNames:
- T1
- T2
- type: segmentOp
description: Add segment FeatureTest1_Segment6 to table FeatureTest1
op: UPLOAD
inputDataFileName: data/FeatureTest1-data-00.csv
schemaFileName: FeatureTest1-schema.json
tableConfigFileName: feature-test-1.json
recordReaderConfigFileName: data/recordReaderConfig.json
segmentName: FeatureTest1_Segment6
- type: queryOp
description: Run query on FeatureTest1 using SQL
queryFileName: queries/feature-test-1-sql.queries
expectedResultsFileName: query-results/feature-test-1-rest-sql.results
Original file line number Diff line number Diff line change
Expand Up @@ -18,32 +18,27 @@
#

# Operations to be done.
description: Operations to be run after Broker downgrade
description: Operations to be run after controller rollback
operations:
- type: tableOp
description: Create table t1
op: CREATE
schemaFileName: /tmp/schema
tableConfigFileName: /tmp/tableConfig
- type: segmentOp
description: Add segments X and Y to T1
op: UPLOAD
inputDataFileName: /tmp/data.csv
tableConfigFileName: /tmp/tableConfig
- type: tableOp
# description: Remove table T1
op: DELETE
tableConfigFileName: /tmp/tableConfig
- type: queryOp
description: Run queries on T1
queryFileName: queries/T1.sql
expectedResultsFileName: queries/T1-results.sql
- type: streamOp
description: Produce rows to topic1
op: PRODUCE
numRows: 66
streamConfigFileName: kafka/topic1.cfg
inputDataFileName: kafka/input.csv
tableConfigFileNames:
- T1
- T2
- type: segmentOp
description: Add segment FeatureTest1_Segment7 to table FeatureTest1
op: UPLOAD
inputDataFileName: data/FeatureTest1-data-00.csv
schemaFileName: FeatureTest1-schema.json
tableConfigFileName: feature-test-1.json
recordReaderConfigFileName: data/recordReaderConfig.json
segmentName: FeatureTest1_Segment7
- type: queryOp
description: Run query on FeatureTest1 using SQL
queryFileName: queries/feature-test-1-sql.queries
expectedResultsFileName: query-results/feature-test-1-rest-sql.results
- type: segmentOp
description: Delete segment FeatureTest1_Segment
op: DELETE
tableConfigFileName: feature-test-1.json
segmentName: FeatureTest1_Segment
- type: tableOp
description: Delete table feature-test-1.json
op: DELETE
schemaFileName: FeatureTest1-schema.json
tableConfigFileName: feature-test-1.json
41 changes: 13 additions & 28 deletions compatibility-verifier/sample-test-suite/post-server-rollback.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,32 +18,17 @@
#

# Operations to be done.
description: Operations to be run after Broker downgrade
description: Operations to be run after server rollback
operations:
- type: tableOp
description: Create table t1
op: CREATE
schemaFileName: /tmp/schema
tableConfigFileName: /tmp/tableConfig
- type: segmentOp
description: Add segments X and Y to T1
op: UPLOAD
inputDataFileName: /tmp/data.csv
tableConfigFileName: /tmp/tableConfig
- type: tableOp
# description: Remove table T1
op: DELETE
tableConfigFileName: /tmp/tableConfig
- type: queryOp
description: Run queries on T1
queryFileName: queries/T1.sql
expectedResultsFileName: queries/T1-results.sql
- type: streamOp
description: Produce rows to topic1
op: PRODUCE
numRows: 66
streamConfigFileName: kafka/topic1.cfg
inputDataFileName: kafka/input.csv
tableConfigFileNames:
- T1
- T2
- type: segmentOp
description: Add segment FeatureTest1_Segment5 to table FeatureTest1
op: UPLOAD
inputDataFileName: data/FeatureTest1-data-00.csv
schemaFileName: FeatureTest1-schema.json
tableConfigFileName: feature-test-1.json
recordReaderConfigFileName: data/recordReaderConfig.json
segmentName: FeatureTest1_Segment5
- type: queryOp
description: Run query on FeatureTest1 using SQL
queryFileName: queries/feature-test-1-sql.queries
expectedResultsFileName: query-results/feature-test-1-rest-sql.results

0 comments on commit 6197a01

Please sign in to comment.