From 3d3e9ae38c30b3a00b5104797d50684e1133af5e Mon Sep 17 00:00:00 2001 From: Ivan Pylypenko Date: Wed, 29 Mar 2023 15:26:04 +0300 Subject: [PATCH] K8SPG-294 Parallization of testing different pg versions --- .../pgo-v1-pg12-operator-aws-openshift-4.yml | 14 + .../pgo-v1-pg12-operator-gke-version.yml | 34 ++ .../pgo-v1-pg13-operator-aws-openshift-4.yml | 14 + .../pgo-v1-pg13-operator-gke-version.yml | 34 ++ .../pgo_v1_operator_aws_openshift-4.groovy | 22 +- cloud/jenkins/pgo_v1_operator_eks.groovy | 22 +- .../pgo_v1_operator_gke_version.groovy | 22 +- ...go_v1_pg12_operator_aws_openshift-4.groovy | 472 ++++++++++++++++++ cloud/jenkins/pgo_v1_pg12_operator_eks.groovy | 400 +++++++++++++++ .../pgo_v1_pg12_operator_gke_version.groovy | 472 ++++++++++++++++++ ...go_v1_pg13_operator_aws_openshift-4.groovy | 472 ++++++++++++++++++ cloud/jenkins/pgo_v1_pg13_operator_eks.groovy | 400 +++++++++++++++ .../pgo_v1_pg13_operator_gke_version.groovy | 472 ++++++++++++++++++ 13 files changed, 2817 insertions(+), 33 deletions(-) create mode 100644 cloud/jenkins/pgo-v1-pg12-operator-aws-openshift-4.yml create mode 100644 cloud/jenkins/pgo-v1-pg12-operator-gke-version.yml create mode 100644 cloud/jenkins/pgo-v1-pg13-operator-aws-openshift-4.yml create mode 100644 cloud/jenkins/pgo-v1-pg13-operator-gke-version.yml create mode 100644 cloud/jenkins/pgo_v1_pg12_operator_aws_openshift-4.groovy create mode 100644 cloud/jenkins/pgo_v1_pg12_operator_eks.groovy create mode 100644 cloud/jenkins/pgo_v1_pg12_operator_gke_version.groovy create mode 100644 cloud/jenkins/pgo_v1_pg13_operator_aws_openshift-4.groovy create mode 100644 cloud/jenkins/pgo_v1_pg13_operator_eks.groovy create mode 100644 cloud/jenkins/pgo_v1_pg13_operator_gke_version.groovy diff --git a/cloud/jenkins/pgo-v1-pg12-operator-aws-openshift-4.yml b/cloud/jenkins/pgo-v1-pg12-operator-aws-openshift-4.yml new file mode 100644 index 0000000000..605e971496 --- /dev/null +++ b/cloud/jenkins/pgo-v1-pg12-operator-aws-openshift-4.yml @@ -0,0 +1,14 @@ +- job: + name: pgo-v1-pg12-operator-aws-openshift-4 + project-type: pipeline + description: | + Do not edit this job through the web! + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - 'master' + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pgo_v1_pg12_operator_aws_openshift-4.groovy diff --git a/cloud/jenkins/pgo-v1-pg12-operator-gke-version.yml b/cloud/jenkins/pgo-v1-pg12-operator-gke-version.yml new file mode 100644 index 0000000000..ff320a6821 --- /dev/null +++ b/cloud/jenkins/pgo-v1-pg12-operator-gke-version.yml @@ -0,0 +1,34 @@ +- job: + name: pgo-v1-pg12-operator-gke-version + project-type: pipeline + description: | + Do not edit this job through the web! + concurrent: false + properties: + - build-discarder: + days-to-keep: -1 + num-to-keep: 10 + artifact-days-to-keep: -1 + artifact-num-to-keep: 10 + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - master + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pgo_v1_pg12_operator_gke_version.groovy + parameters: + - string: + name: GIT_REPO + default: https://github.com/percona/percona-postgresql-operator + description: percona/percona-postgresql-operator repository + - string: + name: GIT_BRANCH + default: release-0.1.0 + description: Tag/Branch for percona/percona-postgresql-operator repository + - string: + name: GKE_VERSION + default: '1.23' + description: GKE version diff --git a/cloud/jenkins/pgo-v1-pg13-operator-aws-openshift-4.yml b/cloud/jenkins/pgo-v1-pg13-operator-aws-openshift-4.yml new file mode 100644 index 0000000000..66cdd72d07 --- /dev/null +++ b/cloud/jenkins/pgo-v1-pg13-operator-aws-openshift-4.yml @@ -0,0 +1,14 @@ +- job: + name: pgo-v1-pg13-operator-aws-openshift-4 + project-type: pipeline + description: | + Do not edit this job through the web! + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - 'master' + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pgo_v1_pg13_operator_aws_openshift-4.groovy diff --git a/cloud/jenkins/pgo-v1-pg13-operator-gke-version.yml b/cloud/jenkins/pgo-v1-pg13-operator-gke-version.yml new file mode 100644 index 0000000000..9335d26452 --- /dev/null +++ b/cloud/jenkins/pgo-v1-pg13-operator-gke-version.yml @@ -0,0 +1,34 @@ +- job: + name: pgo-v1-pg13-operator-gke-version + project-type: pipeline + description: | + Do not edit this job through the web! + concurrent: false + properties: + - build-discarder: + days-to-keep: -1 + num-to-keep: 10 + artifact-days-to-keep: -1 + artifact-num-to-keep: 10 + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - master + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pgo_v1_pg13_operator_gke_version.groovy + parameters: + - string: + name: GIT_REPO + default: https://github.com/percona/percona-postgresql-operator + description: percona/percona-postgresql-operator repository + - string: + name: GIT_BRANCH + default: release-0.1.0 + description: Tag/Branch for percona/percona-postgresql-operator repository + - string: + name: GKE_VERSION + default: '1.23' + description: GKE version diff --git a/cloud/jenkins/pgo_v1_operator_aws_openshift-4.groovy b/cloud/jenkins/pgo_v1_operator_aws_openshift-4.groovy index 98fcf6cd4d..80c95cc86d 100644 --- a/cloud/jenkins/pgo_v1_operator_aws_openshift-4.groovy +++ b/cloud/jenkins/pgo_v1_operator_aws_openshift-4.groovy @@ -249,47 +249,47 @@ pipeline { name: 'PG_VERSION') string( defaultValue: '', - description: 'Operator image: perconalab/percona-postgresql-operator:main-postgres-operator', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', name: 'PGO_OPERATOR_IMAGE') string( defaultValue: '', - description: 'Operators API server image: perconalab/percona-postgresql-operator:main-pgo-apiserver', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', name: 'PGO_APISERVER_IMAGE') string( defaultValue: '', - description: 'Operators event server image: perconalab/percona-postgresql-operator:main-pgo-event', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', name: 'PGO_EVENT_IMAGE') string( defaultValue: '', - description: 'Operators rmdata image: perconalab/percona-postgresql-operator:main-pgo-rmdata', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', name: 'PGO_RMDATA_IMAGE') string( defaultValue: '', - description: 'Operators scheduler image: perconalab/percona-postgresql-operator:main-pgo-scheduler', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', name: 'PGO_SCHEDULER_IMAGE') string( defaultValue: '', - description: 'Operators deployer image: perconalab/percona-postgresql-operator:main-pgo-deployer', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', name: 'PGO_DEPLOYER_IMAGE') string( defaultValue: '', - description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg14-pgbouncer', name: 'PGO_PGBOUNCER_IMAGE') string( defaultValue: '', - description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg14-postgres-ha', name: 'PGO_POSTGRES_HA_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest', name: 'PGO_BACKREST_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo', name: 'PGO_BACKREST_REPO_IMAGE') string( defaultValue: '', - description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg14-pgbadger', name: 'PGO_PGBADGER_IMAGE') string( defaultValue: 'perconalab/pmm-server', diff --git a/cloud/jenkins/pgo_v1_operator_eks.groovy b/cloud/jenkins/pgo_v1_operator_eks.groovy index f2f8d551ea..80c497c745 100644 --- a/cloud/jenkins/pgo_v1_operator_eks.groovy +++ b/cloud/jenkins/pgo_v1_operator_eks.groovy @@ -171,47 +171,47 @@ pipeline { name: 'PG_VERSION') string( defaultValue: '', - description: 'Operator image: perconalab/percona-postgresql-operator:main-postgres-operator', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', name: 'PGO_OPERATOR_IMAGE') string( defaultValue: '', - description: 'Operators API server image: perconalab/percona-postgresql-operator:main-pgo-apiserver', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', name: 'PGO_APISERVER_IMAGE') string( defaultValue: '', - description: 'Operators event server image: perconalab/percona-postgresql-operator:main-pgo-event', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', name: 'PGO_EVENT_IMAGE') string( defaultValue: '', - description: 'Operators rmdata image: perconalab/percona-postgresql-operator:main-pgo-rmdata', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', name: 'PGO_RMDATA_IMAGE') string( defaultValue: '', - description: 'Operators scheduler image: perconalab/percona-postgresql-operator:main-pgo-scheduler', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', name: 'PGO_SCHEDULER_IMAGE') string( defaultValue: '', - description: 'Operators deployer image: perconalab/percona-postgresql-operator:main-pgo-deployer', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', name: 'PGO_DEPLOYER_IMAGE') string( defaultValue: '', - description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg14-pgbouncer', name: 'PGO_PGBOUNCER_IMAGE') string( defaultValue: '', - description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg14-postgres-ha', name: 'PGO_POSTGRES_HA_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest', name: 'PGO_BACKREST_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo', name: 'PGO_BACKREST_REPO_IMAGE') string( defaultValue: '', - description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg14-pgbadger', name: 'PGO_PGBADGER_IMAGE') string( defaultValue: 'perconalab/pmm-server', diff --git a/cloud/jenkins/pgo_v1_operator_gke_version.groovy b/cloud/jenkins/pgo_v1_operator_gke_version.groovy index c4fdd87643..235781c9b6 100644 --- a/cloud/jenkins/pgo_v1_operator_gke_version.groovy +++ b/cloud/jenkins/pgo_v1_operator_gke_version.groovy @@ -252,47 +252,47 @@ pipeline { name: 'IS_GKE_ALPHA') string( defaultValue: '', - description: 'Operator image: perconalab/percona-postgresql-operator:main-postgres-operator', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', name: 'PGO_OPERATOR_IMAGE') string( defaultValue: '', - description: 'Operators API server image: perconalab/percona-postgresql-operator:main-pgo-apiserver', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', name: 'PGO_APISERVER_IMAGE') string( defaultValue: '', - description: 'Operators event server image: perconalab/percona-postgresql-operator:main-pgo-event', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', name: 'PGO_EVENT_IMAGE') string( defaultValue: '', - description: 'Operators rmdata image: perconalab/percona-postgresql-operator:main-pgo-rmdata', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', name: 'PGO_RMDATA_IMAGE') string( defaultValue: '', - description: 'Operators scheduler image: perconalab/percona-postgresql-operator:main-pgo-scheduler', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', name: 'PGO_SCHEDULER_IMAGE') string( defaultValue: '', - description: 'Operators deployer image: perconalab/percona-postgresql-operator:main-pgo-deployer', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', name: 'PGO_DEPLOYER_IMAGE') string( defaultValue: '', - description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg14-pgbouncer', name: 'PGO_PGBOUNCER_IMAGE') string( defaultValue: '', - description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg14-postgres-ha', name: 'PGO_POSTGRES_HA_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest', name: 'PGO_BACKREST_IMAGE') string( defaultValue: '', - description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg14-pgbackrest-repo', name: 'PGO_BACKREST_REPO_IMAGE') string( defaultValue: '', - description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg14-pgbadger', name: 'PGO_PGBADGER_IMAGE') string( defaultValue: 'perconalab/pmm-server', diff --git a/cloud/jenkins/pgo_v1_pg12_operator_aws_openshift-4.groovy b/cloud/jenkins/pgo_v1_pg12_operator_aws_openshift-4.groovy new file mode 100644 index 0000000000..9876bdaa4b --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg12_operator_aws_openshift-4.groovy @@ -0,0 +1,472 @@ +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +TestsReport = '\n' +testsReportMap = [:] +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void CreateCluster(String CLUSTER_SUFFIX){ + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secrets', variable: 'OPENSHIFT_CONF_FILE')]) { + sh """ + platform_version=`echo "\${params.PLATFORM_VER}" | awk -F. '{ printf("%d%03d%03d%03d\\n", \$1,\$2,\$3,\$4); }';` + version=`echo "4.12.0" | awk -F. '{ printf("%d%03d%03d%03d\\n", \$1,\$2,\$3,\$4); }';` + if [ \$platform_version -ge \$version ];then + POLICY="additionalTrustBundlePolicy: Proxyonly" + NETWORK_TYPE="OVNKubernetes" + else + POLICY="" + NETWORK_TYPE="OpenShiftSDN" + fi + mkdir -p openshift/${CLUSTER_SUFFIX} +cat <<-EOF > ./openshift/${CLUSTER_SUFFIX}/install-config.yaml +\$POLICY +apiVersion: v1 +baseDomain: cd.percona.com +compute: +- architecture: amd64 + hyperthreading: Enabled + name: worker + platform: + aws: + type: m5.2xlarge + replicas: 3 +controlPlane: + architecture: amd64 + hyperthreading: Enabled + name: master + platform: {} + replicas: 1 +metadata: + creationTimestamp: null + name: openshift4-par-pgo-jenkins-${CLUSTER_SUFFIX} +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + machineNetwork: + - cidr: 10.0.0.0/16 + networkType: \$NETWORK_TYPE + serviceNetwork: + - 172.30.0.0/16 +platform: + aws: + region: eu-west-3 + userTags: + iit-billing-tag: openshift + delete-cluster-after-hours: 8 + team: cloud + product: pgo-v1-operator + job: ${env.JOB_NAME} + build: ${env.BUILD_NUMBER} + +publish: External +EOF + cat $OPENSHIFT_CONF_FILE >> ./openshift/${CLUSTER_SUFFIX}/install-config.yaml + """ + + sshagent(['aws-openshift-41-key']) { + sh """ + /usr/local/bin/openshift-install create cluster --dir=./openshift/${CLUSTER_SUFFIX} + export KUBECONFIG=./openshift/${CLUSTER_SUFFIX}/auth/kubeconfig + + """ + } + } +} + +void ShutdownCluster(String CLUSTER_SUFFIX) { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { + sshagent(['aws-openshift-41-key']) { + sh """ + /usr/local/bin/openshift-install destroy cluster --dir=./openshift/${CLUSTER_SUFFIX} + """ + } + } + +} + +void runTest(String TEST_NAME, String CLUSTER_SUFFIX) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + + popArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + + sh """ + if [ -f "${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + source $HOME/google-cloud-sdk/path.bash.inc + export KUBECONFIG=$WORKSPACE/openshift/${CLUSTER_SUFFIX}/auth/kubeconfig + oc whoami + + ./e2e-tests/$TEST_NAME/run + fi + """ + pushArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} +void installRpms() { + sh """ + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y percona-xtrabackup-80 jq | true + """ +} +pipeline { + parameters { + string( + defaultValue: '4.10.54', + description: 'OpenShift version to use', + name: 'OS_VERSION') + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '12', + description: 'PG version', + name: 'PG_VERSION') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg12-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg12-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg12-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + environment { + TF_IN_AUTOMATION = 'true' + CLEAN_NAMESPACE = 1 + } + agent { + label 'docker' + } + options { + buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '10', artifactNumToKeepStr: '10')) + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + sh """ + wget https://releases.hashicorp.com/terraform/0.11.14/terraform_0.11.14_linux_amd64.zip + unzip terraform_0.11.14_linux_amd64.zip + sudo mv terraform /usr/local/bin/ && rm terraform_0.11.14_linux_amd64.zip + """ + installRpms() + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components update kubectl + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud version + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OS_VERSION/openshift-client-linux-$OS_VERSION.tar.gz \ + | sudo tar -C /usr/local/bin --wildcards -zxvpf - + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OS_VERSION/openshift-install-linux-$OS_VERSION.tar.gz \ + | sudo tar -C /usr/local/bin --wildcards -zxvpf - + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + } + + } + } + stage('Build docker image') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'),file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PG operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Run tests') { + parallel { + stage('E2E Basic tests') { + steps { + CreateCluster('$PG_VERSION-sandbox') + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + runTest('init-deploy', 'sandbox') + } + runTest('scaling', '$PG_VERSION-sandbox') + runTest('recreate', '$PG_VERSION-sandbox') + runTest('affinity', '$PG_VERSION-sandbox') + runTest('monitoring', '$PG_VERSION-sandbox') + runTest('self-healing', '$PG_VERSION-sandbox') + runTest('operator-self-healing', '$PG_VERSION-sandbox') + runTest('clone-cluster', '$PG_VERSION-sandbox') + runTest('tls-check', '$PG_VERSION-sandbox') + runTest('users', '$PG_VERSION-sandbox') + runTest('ns-mode', '$PG_VERSION-sandbox') + ShutdownCluster('$PG_VERSION-sandbox') + } + } + stage('E2E demand-backup') { + steps { + CreateCluster('$PG_VERSION-demand-backup') + runTest('demand-backup', '$PG_VERSION-demand-backup') + ShutdownCluster('$PG_VERSION-demand-backup') + } + } + stage('E2E scheduled-backup') { + steps { + CreateCluster('$PG_VERSION-scheduled-backup') + runTest('scheduled-backup', '$PG_VERSION-scheduled-backup') + ShutdownCluster('$PG_VERSION-scheduled-backup') + } + } + stage('E2E Upgrade') { + steps { + CreateCluster('$PG_VERSION-upgrade') + runTest('upgrade', '$PG_VERSION-upgrade') + runTest('smart-update', '$PG_VERSION-upgrade') + ShutdownCluster('$PG_VERSION-upgrade') + } + } + stage('E2E Version-service') { + steps { + CreateCluster('$PG_VERSION-version-service') + runTest('version-service', '$PG_VERSION-version-service') + ShutdownCluster('$PG_VERSION-version-service') + } + } + } + } + stage('Make report') { + steps { + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + } + } + } + + post { + always { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { + sshagent(['aws-openshift-41-key']) { + sh """ + for cluster_suffix in '${params.PG_VERSION}-sandbox' '${params.PG_VERSION}-demand-backup' '${params.PG_VERSION}-scheduled-backup' '${params.PG_VERSION}-upgrade' '${params.PG_VERSION}-version-service' + do + /usr/local/bin/openshift-install destroy cluster --dir=./openshift/\$cluster_suffix > /dev/null 2>&1 || true + done + """ + } + } + + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf $HOME/google-cloud-sdk + sudo rm -rf ./* + ''' + deleteDir() + } + } +} diff --git a/cloud/jenkins/pgo_v1_pg12_operator_eks.groovy b/cloud/jenkins/pgo_v1_pg12_operator_eks.groovy new file mode 100644 index 0000000000..3178f43481 --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg12_operator_eks.groovy @@ -0,0 +1,400 @@ +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +TestsReport = '\n' +testsReportMap = [:] +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void runTest(String TEST_NAME) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + + popArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd'], file(credentialsId: 'eks-conf-file', variable: 'EKS_CONF_FILE')]) { + sh """ + if [ -f "${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + export PATH=/home/ec2-user/.local/bin:$PATH + source $HOME/google-cloud-sdk/path.bash.inc + export KUBECONFIG=~/.kube/config + + ./e2e-tests/$TEST_NAME/run + fi + """ + } + pushArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} +void installRpms() { + sh """ + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y jq | true + """ +} +pipeline { + parameters { + string( + defaultValue: '1.21', + description: 'Kubernetes target version', + name: 'KUBEVERSION') + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '12', + description: 'PG version', + name: 'PG_VERSION') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg12-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg12-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg12-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + agent { + label 'docker' + } + options { + buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '10', artifactNumToKeepStr: '10')) + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + installRpms() + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components update kubectl + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud version + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + + curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp + sudo mv -v /tmp/eksctl /usr/local/bin + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + } + } + } + stage('Build docker image') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PGO operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Create EKS Infrastructure') { + steps { + sh ''' +cat <<-EOF > cluster.yaml +# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand): +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-pgo-pg$PG_VERSION-cluster + region: eu-west-3 + version: '$KUBEVERSION' + +iam: + withOIDC: true + +addons: +- name: aws-ebs-csi-driver + wellKnownPolicies: + ebsCSIController: true + +nodeGroups: + - name: ng-1 + minSize: 3 + maxSize: 5 + instancesDistribution: + maxPrice: 0.15 + instanceTypes: ["m5.xlarge", "m5.2xlarge"] # At least two instance types should be specified + onDemandBaseCapacity: 0 + onDemandPercentageAboveBaseCapacity: 50 + spotInstancePools: 2 + tags: + 'iit-billing-tag': 'jenkins-eks' + team: cloud + product: pgo-v1-operator + job: $JOB_NAME + build: '$BUILD_NUMBER' +EOF + ''' + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + export PATH=/home/ec2-user/.local/bin:$PATH + source $HOME/google-cloud-sdk/path.bash.inc + + eksctl create cluster -f cluster.yaml + """ + } + stash includes: 'cluster.yaml', name: 'cluster_conf' + } + } + stage('Run Tests') { + environment { + CLEAN_NAMESPACE = 1 + } + steps { + runTest('init-deploy') + runTest('scaling') + runTest('recreate') + runTest('affinity') + runTest('monitoring') + // runTest('self-healing') + // runTest('operator-self-healing') + runTest('demand-backup') + runTest('scheduled-backup') + runTest('upgrade') + runTest('smart-update') + runTest('version-service') + runTest('users') + runTest('ns-mode') + } + } + stage('Make report') { + steps { + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + } + } + } + + post { + always { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + unstash 'cluster_conf' + sh ''' + eksctl delete addon --name aws-ebs-csi-driver --cluster eks-pgo-pg$PG_VERSION-cluster --region eu-west-3 + eksctl delete cluster -f cluster.yaml --wait --force + ''' + } + + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf $HOME/google-cloud-sdk + sudo rm -rf ./* + ''' + deleteDir() + } + } +} diff --git a/cloud/jenkins/pgo_v1_pg12_operator_gke_version.groovy b/cloud/jenkins/pgo_v1_pg12_operator_gke_version.groovy new file mode 100644 index 0000000000..27f0117c52 --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg12_operator_gke_version.groovy @@ -0,0 +1,472 @@ +GKERegion='us-central1-c' + +void CreateCluster(String CLUSTER_PREFIX) { + if ( "${params.IS_GKE_ALPHA}" == "YES" ) { + runGKEclusterAlpha(CLUSTER_PREFIX) + } else { + runGKEcluster(CLUSTER_PREFIX) + } +} +void runGKEcluster(String CLUSTER_PREFIX) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + ret_num=0 + while [ \${ret_num} -lt 15 ]; do + ret_val=0 + gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE && \ + gcloud config set project $GCP_PROJECT && \ + gcloud container clusters create --zone ${GKERegion} \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) --cluster-version $GKE_VERSION --machine-type n1-standard-4 --preemptible --num-nodes=3 --network=jenkins-pg-vpc --subnetwork=jenkins-pg-${CLUSTER_PREFIX} --no-enable-autoupgrade --cluster-ipv4-cidr=/21 --labels delete-cluster-after-hours=6 && \ + kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com || ret_val=\$? + if [ \${ret_val} -eq 0 ]; then break; fi + ret_num=\$((ret_num + 1)) + done + if [ \${ret_num} -eq 15 ]; then exit 1; fi + """ + } +} +void runGKEclusterAlpha(String CLUSTER_PREFIX) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + ret_num=0 + while [ \${ret_num} -lt 15 ]; do + ret_val=0 + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE && \ + gcloud config set project $GCP_PROJECT && \ + gcloud alpha container clusters create --release-channel rapid \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) --zone ${GKERegion} --cluster-version $GKE_VERSION --project $GCP_PROJECT --preemptible --machine-type n1-standard-4 --num-nodes=4 --min-nodes=4 --max-nodes=6 --network=jenkins-pg-vpc --subnetwork=jenkins-pg-${CLUSTER_PREFIX} --cluster-ipv4-cidr=/21 --labels delete-cluster-after-hours=6 && \ + kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user=\$(gcloud config get-value core/account) || ret_val=\$? + if [ \${ret_val} -eq 0 ]; then break; fi + ret_num=\$((ret_num + 1)) + done + if [ \${ret_num} -eq 15 ]; then exit 1; fi + """ + } +} +void ShutdownCluster(String CLUSTER_PREFIX) { + if ( "${params.IS_GKE_ALPHA}" == "YES" ) { + ACCOUNT='alpha-svc-acct' + CRED_ID='gcloud-alpha-key-file' + } else { + ACCOUNT='jenkins' + CRED_ID='gcloud-key-file' + } + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: CRED_ID, variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + gcloud auth activate-service-account $ACCOUNT@"$GCP_PROJECT".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud container clusters delete --zone ${GKERegion} \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) + """ + } +} +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/${env.GIT_SHORT_COMMIT} + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/${env.GIT_SHORT_COMMIT} + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +testsResultsMap = [:] +testsReportMap = [:] +TestsReport = '\n' + +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void setTestsresults() { + testsResultsMap.each { file -> + pushArtifactFile("${file.key}") + } +} + +void runTest(String TEST_NAME, String CLUSTER_PREFIX) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + popArtifactFile("${env.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG") + + timeout(time: 120, unit: 'MINUTES') { + sh """ + if [ -f "${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + source $HOME/google-cloud-sdk/path.bash.inc + ./e2e-tests/$TEST_NAME/run + fi + """ + } + pushArtifactFile("${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG") + testsResultsMap["${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG"] = 'passed' + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} + +void installRpms() { + sh ''' + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y jq | true + ''' +} +pipeline { + environment { + CLOUDSDK_CORE_DISABLE_PROMPTS = 1 + } + parameters { + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '1.24', + description: 'GKE version', + name: 'GKE_VERSION') + string( + defaultValue: '12', + description: 'PG version', + name: 'PG_VERSION') + choice( + choices: 'NO\nYES', + description: 'GKE alpha/stable', + name: 'IS_GKE_ALPHA') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg12-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg12-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg12-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg12-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + agent { + label 'docker' + } + options { + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + # sudo is needed for better node recovery after compilation failure + # if building failed on compilation stage directory will have files owned by docker user + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + """ + stash includes: "source/**", name: "sourceFILES" + + installRpms() + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components install alpha + gcloud components install kubectl + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + curl -s -L https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 --wildcards -zxvpf - '*/oc' + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + unstash "sourceFILES" + withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + ''' + } + } + } + stage('Build docker image') { + steps { + unstash "sourceFILES" + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { + sh ''' + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PG operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Run Tests') { + environment { + CLOUDSDK_CORE_DISABLE_PROMPTS = 1 + CLEAN_NAMESPACE = 1 + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + VERSION = "${env.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}" + CLUSTER_NAME = sh(script: "echo jkns-ver-pgo-${PG_VERSION}-${GIT_SHORT_COMMIT} | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() + PGO_K8S_NAME = "${env.CLUSTER_NAME}-upstream" + ECR = "119175775298.dkr.ecr.us-east-1.amazonaws.com" + } + parallel { + stage('E2E Basic tests') { + steps { + CreateCluster('sandbox') + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + runTest('init-deploy', 'sandbox') + } + runTest('scaling', 'sandbox') + runTest('recreate', 'sandbox') + runTest('affinity', 'sandbox') + runTest('monitoring', 'sandbox') + runTest('self-healing', 'sandbox') + runTest('operator-self-healing', 'sandbox') + runTest('clone-cluster', 'sandbox') + runTest('tls-check', 'sandbox') + runTest('users', 'sandbox') + runTest('ns-mode', 'sandbox') + ShutdownCluster('sandbox') + } + } + stage('E2E demand-backup') { + steps { + CreateCluster('demand-backup') + runTest('demand-backup', 'demand-backup') + ShutdownCluster('demand-backup') + } + } + stage('E2E scheduled-backup') { + steps { + CreateCluster('scheduled-backup') + runTest('scheduled-backup', 'scheduled-backup') + ShutdownCluster('scheduled-backup') + } + } + stage('E2E Upgrade') { + steps { + CreateCluster('upgrade') + runTest('upgrade', 'upgrade') + runTest('smart-update', 'upgrade') + ShutdownCluster('upgrade') + } + } + stage('E2E Version-service') { + steps { + CreateCluster('version-service') + runTest('version-service', 'version-service') + ShutdownCluster('version-service') + } + } + } + } + } + post { + always { + setTestsresults() + + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + export CLUSTER_NAME=$(echo jkns-ver-pgo-${PG_VERSION}-$(git -C source rev-parse --short HEAD) | tr '[:upper:]' '[:lower:]') + source $HOME/google-cloud-sdk/path.bash.inc + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud container clusters list --format='csv[no-heading](name)' --filter $CLUSTER_NAME | xargs gcloud container clusters delete --zone ${GKERegion} --quiet || true + ''' + } + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf ./* + sudo rm -rf $HOME/google-cloud-sdk + ''' + deleteDir() + } + } +} diff --git a/cloud/jenkins/pgo_v1_pg13_operator_aws_openshift-4.groovy b/cloud/jenkins/pgo_v1_pg13_operator_aws_openshift-4.groovy new file mode 100644 index 0000000000..af40dccac8 --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg13_operator_aws_openshift-4.groovy @@ -0,0 +1,472 @@ +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +TestsReport = '\n' +testsReportMap = [:] +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void CreateCluster(String CLUSTER_SUFFIX){ + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secrets', variable: 'OPENSHIFT_CONF_FILE')]) { + sh """ + platform_version=`echo "\${params.PLATFORM_VER}" | awk -F. '{ printf("%d%03d%03d%03d\\n", \$1,\$2,\$3,\$4); }';` + version=`echo "4.12.0" | awk -F. '{ printf("%d%03d%03d%03d\\n", \$1,\$2,\$3,\$4); }';` + if [ \$platform_version -ge \$version ];then + POLICY="additionalTrustBundlePolicy: Proxyonly" + NETWORK_TYPE="OVNKubernetes" + else + POLICY="" + NETWORK_TYPE="OpenShiftSDN" + fi + mkdir -p openshift/${CLUSTER_SUFFIX} +cat <<-EOF > ./openshift/${CLUSTER_SUFFIX}/install-config.yaml +\$POLICY +apiVersion: v1 +baseDomain: cd.percona.com +compute: +- architecture: amd64 + hyperthreading: Enabled + name: worker + platform: + aws: + type: m5.2xlarge + replicas: 3 +controlPlane: + architecture: amd64 + hyperthreading: Enabled + name: master + platform: {} + replicas: 1 +metadata: + creationTimestamp: null + name: openshift4-par-pgo-jenkins-${CLUSTER_SUFFIX} +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + machineNetwork: + - cidr: 10.0.0.0/16 + networkType: \$NETWORK_TYPE + serviceNetwork: + - 172.30.0.0/16 +platform: + aws: + region: eu-west-3 + userTags: + iit-billing-tag: openshift + delete-cluster-after-hours: 8 + team: cloud + product: pgo-v1-operator + job: ${env.JOB_NAME} + build: ${env.BUILD_NUMBER} + +publish: External +EOF + cat $OPENSHIFT_CONF_FILE >> ./openshift/${CLUSTER_SUFFIX}/install-config.yaml + """ + + sshagent(['aws-openshift-41-key']) { + sh """ + /usr/local/bin/openshift-install create cluster --dir=./openshift/${CLUSTER_SUFFIX} + export KUBECONFIG=./openshift/${CLUSTER_SUFFIX}/auth/kubeconfig + + """ + } + } +} + +void ShutdownCluster(String CLUSTER_SUFFIX) { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { + sshagent(['aws-openshift-41-key']) { + sh """ + /usr/local/bin/openshift-install destroy cluster --dir=./openshift/${CLUSTER_SUFFIX} + """ + } + } + +} + +void runTest(String TEST_NAME, String CLUSTER_SUFFIX) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + + popArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + + sh """ + if [ -f "${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + source $HOME/google-cloud-sdk/path.bash.inc + export KUBECONFIG=$WORKSPACE/openshift/${CLUSTER_SUFFIX}/auth/kubeconfig + oc whoami + + ./e2e-tests/$TEST_NAME/run + fi + """ + pushArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} +void installRpms() { + sh """ + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y percona-xtrabackup-80 jq | true + """ +} +pipeline { + parameters { + string( + defaultValue: '4.10.54', + description: 'OpenShift version to use', + name: 'OS_VERSION') + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '13', + description: 'PG version', + name: 'PG_VERSION') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + environment { + TF_IN_AUTOMATION = 'true' + CLEAN_NAMESPACE = 1 + } + agent { + label 'docker' + } + options { + buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '10', artifactNumToKeepStr: '10')) + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + sh """ + wget https://releases.hashicorp.com/terraform/0.11.14/terraform_0.11.14_linux_amd64.zip + unzip terraform_0.11.14_linux_amd64.zip + sudo mv terraform /usr/local/bin/ && rm terraform_0.11.14_linux_amd64.zip + """ + installRpms() + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components update kubectl + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud version + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OS_VERSION/openshift-client-linux-$OS_VERSION.tar.gz \ + | sudo tar -C /usr/local/bin --wildcards -zxvpf - + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OS_VERSION/openshift-install-linux-$OS_VERSION.tar.gz \ + | sudo tar -C /usr/local/bin --wildcards -zxvpf - + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + } + + } + } + stage('Build docker image') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'),file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PG operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Run tests') { + parallel { + stage('E2E Basic tests') { + steps { + CreateCluster('$PG_VERSION-sandbox') + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + runTest('init-deploy', '$PG_VERSION-sandbox') + } + runTest('scaling', '$PG_VERSION-sandbox') + runTest('recreate', '$PG_VERSION-sandbox') + runTest('affinity', '$PG_VERSION-sandbox') + runTest('monitoring', '$PG_VERSION-sandbox') + runTest('self-healing', '$PG_VERSION-sandbox') + runTest('operator-self-healing', '$PG_VERSION-sandbox') + runTest('clone-cluster', '$PG_VERSION-sandbox') + runTest('tls-check', '$PG_VERSION-sandbox') + runTest('users', '$PG_VERSION-sandbox') + runTest('ns-mode', '$PG_VERSION-sandbox') + ShutdownCluster('$PG_VERSION-sandbox') + } + } + stage('E2E demand-backup') { + steps { + CreateCluster('$PG_VERSION-demand-backup') + runTest('demand-backup', '$PG_VERSION-demand-backup') + ShutdownCluster('$PG_VERSION-demand-backup') + } + } + stage('E2E scheduled-backup') { + steps { + CreateCluster('$PG_VERSION-scheduled-backup') + runTest('scheduled-backup', '$PG_VERSION-scheduled-backup') + ShutdownCluster('$PG_VERSION-scheduled-backup') + } + } + stage('E2E Upgrade') { + steps { + CreateCluster('$PG_VERSION-upgrade') + runTest('upgrade', '$PG_VERSION-upgrade') + runTest('smart-update', '$PG_VERSION-upgrade') + ShutdownCluster('$PG_VERSION-upgrade') + } + } + stage('E2E Version-service') { + steps { + CreateCluster('$PG_VERSION-version-service') + runTest('version-service', '$PG_VERSION-version-service') + ShutdownCluster('$PG_VERSION-version-service') + } + } + } + } + stage('Make report') { + steps { + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + } + } + } + + post { + always { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { + sshagent(['aws-openshift-41-key']) { + sh """ + for cluster_suffix in '${params.PG_VERSION}-sandbox' '${params.PG_VERSION}-demand-backup' '${params.PG_VERSION}-scheduled-backup' '${params.PG_VERSION}-upgrade' '${params.PG_VERSION}-version-service' + do + /usr/local/bin/openshift-install destroy cluster --dir=./openshift/\$cluster_suffix > /dev/null 2>&1 || true + done + """ + } + } + + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf $HOME/google-cloud-sdk + sudo rm -rf ./* + ''' + deleteDir() + } + } +} diff --git a/cloud/jenkins/pgo_v1_pg13_operator_eks.groovy b/cloud/jenkins/pgo_v1_pg13_operator_eks.groovy new file mode 100644 index 0000000000..4370ff1aa5 --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg13_operator_eks.groovy @@ -0,0 +1,400 @@ +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source describe --always --dirty) + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +TestsReport = '\n' +testsReportMap = [:] +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void runTest(String TEST_NAME) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + + popArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd'], file(credentialsId: 'eks-conf-file', variable: 'EKS_CONF_FILE')]) { + sh """ + if [ -f "${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + export PATH=/home/ec2-user/.local/bin:$PATH + source $HOME/google-cloud-sdk/path.bash.inc + export KUBECONFIG=~/.kube/config + + ./e2e-tests/$TEST_NAME/run + fi + """ + } + pushArtifactFile("${params.GIT_BRANCH}-$GIT_SHORT_COMMIT-$TEST_NAME-$PPG_TAG") + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} +void installRpms() { + sh """ + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y jq | true + """ +} +pipeline { + parameters { + string( + defaultValue: '1.21', + description: 'Kubernetes target version', + name: 'KUBEVERSION') + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '13', + description: 'PG version', + name: 'PG_VERSION') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + agent { + label 'docker' + } + options { + buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '10', artifactNumToKeepStr: '10')) + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + installRpms() + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components update kubectl + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud version + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + + curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp + sudo mv -v /tmp/eksctl /usr/local/bin + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + } + } + } + stage('Build docker image') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PGO operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Create EKS Infrastructure') { + steps { + sh ''' +cat <<-EOF > cluster.yaml +# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand): +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-pgo-pg$PG_VERSION-cluster + region: eu-west-3 + version: '$KUBEVERSION' + +iam: + withOIDC: true + +addons: +- name: aws-ebs-csi-driver + wellKnownPolicies: + ebsCSIController: true + +nodeGroups: + - name: ng-1 + minSize: 3 + maxSize: 5 + instancesDistribution: + maxPrice: 0.15 + instanceTypes: ["m5.xlarge", "m5.2xlarge"] # At least two instance types should be specified + onDemandBaseCapacity: 0 + onDemandPercentageAboveBaseCapacity: 50 + spotInstancePools: 2 + tags: + 'iit-billing-tag': 'jenkins-eks' + team: cloud + product: pgo-v1-operator + job: $JOB_NAME + build: '$BUILD_NUMBER' +EOF + ''' + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + export PATH=/home/ec2-user/.local/bin:$PATH + source $HOME/google-cloud-sdk/path.bash.inc + + eksctl create cluster -f cluster.yaml + """ + } + stash includes: 'cluster.yaml', name: 'cluster_conf' + } + } + stage('Run Tests') { + environment { + CLEAN_NAMESPACE = 1 + } + steps { + runTest('init-deploy') + runTest('scaling') + runTest('recreate') + runTest('affinity') + runTest('monitoring') + // runTest('self-healing') + // runTest('operator-self-healing') + runTest('demand-backup') + runTest('scheduled-backup') + runTest('upgrade') + runTest('smart-update') + runTest('version-service') + runTest('users') + runTest('ns-mode') + } + } + stage('Make report') { + steps { + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + } + } + } + + post { + always { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + unstash 'cluster_conf' + sh ''' + eksctl delete addon --name aws-ebs-csi-driver --cluster eks-pgo-pg$PG_VERSION-cluster --region eu-west-3 + eksctl delete cluster -f cluster.yaml --wait --force + ''' + } + + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf $HOME/google-cloud-sdk + sudo rm -rf ./* + ''' + deleteDir() + } + } +} diff --git a/cloud/jenkins/pgo_v1_pg13_operator_gke_version.groovy b/cloud/jenkins/pgo_v1_pg13_operator_gke_version.groovy new file mode 100644 index 0000000000..32927b31e7 --- /dev/null +++ b/cloud/jenkins/pgo_v1_pg13_operator_gke_version.groovy @@ -0,0 +1,472 @@ +GKERegion='us-central1-c' + +void CreateCluster(String CLUSTER_PREFIX) { + if ( "${params.IS_GKE_ALPHA}" == "YES" ) { + runGKEclusterAlpha(CLUSTER_PREFIX) + } else { + runGKEcluster(CLUSTER_PREFIX) + } +} +void runGKEcluster(String CLUSTER_PREFIX) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + ret_num=0 + while [ \${ret_num} -lt 15 ]; do + ret_val=0 + gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE && \ + gcloud config set project $GCP_PROJECT && \ + gcloud container clusters create --zone ${GKERegion} \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) --cluster-version $GKE_VERSION --machine-type n1-standard-4 --preemptible --num-nodes=3 --network=jenkins-pg-vpc --subnetwork=jenkins-pg-${CLUSTER_PREFIX} --no-enable-autoupgrade --cluster-ipv4-cidr=/21 --labels delete-cluster-after-hours=6 && \ + kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com || ret_val=\$? + if [ \${ret_val} -eq 0 ]; then break; fi + ret_num=\$((ret_num + 1)) + done + if [ \${ret_num} -eq 15 ]; then exit 1; fi + """ + } +} +void runGKEclusterAlpha(String CLUSTER_PREFIX) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + ret_num=0 + while [ \${ret_num} -lt 15 ]; do + ret_val=0 + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE && \ + gcloud config set project $GCP_PROJECT && \ + gcloud alpha container clusters create --release-channel rapid \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) --zone ${GKERegion} --cluster-version $GKE_VERSION --project $GCP_PROJECT --preemptible --machine-type n1-standard-4 --num-nodes=4 --min-nodes=4 --max-nodes=6 --network=jenkins-pg-vpc --subnetwork=jenkins-pg-${CLUSTER_PREFIX} --cluster-ipv4-cidr=/21 --labels delete-cluster-after-hours=6 && \ + kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user=\$(gcloud config get-value core/account) || ret_val=\$? + if [ \${ret_val} -eq 0 ]; then break; fi + ret_num=\$((ret_num + 1)) + done + if [ \${ret_num} -eq 15 ]; then exit 1; fi + """ + } +} +void ShutdownCluster(String CLUSTER_PREFIX) { + if ( "${params.IS_GKE_ALPHA}" == "YES" ) { + ACCOUNT='alpha-svc-acct' + CRED_ID='gcloud-alpha-key-file' + } else { + ACCOUNT='jenkins' + CRED_ID='gcloud-key-file' + } + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: CRED_ID, variable: 'CLIENT_SECRET_FILE')]) { + sh """ + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + export USE_GKE_GCLOUD_AUTH_PLUGIN=True + source $HOME/google-cloud-sdk/path.bash.inc + gcloud auth activate-service-account $ACCOUNT@"$GCP_PROJECT".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud container clusters delete --zone ${GKERegion} \$(echo $CLUSTER_NAME-${CLUSTER_PREFIX} | cut -c-40) + """ + } +} +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch ${FILE_NAME} + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/${env.GIT_SHORT_COMMIT} + aws s3 ls \$S3_PATH/${FILE_NAME} || : + aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : + """ + } +} + +void popArtifactFile(String FILE_NAME) { + echo "Try to get $FILE_NAME file from S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/${env.GIT_SHORT_COMMIT} + aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : + """ + } +} + +testsResultsMap = [:] +testsReportMap = [:] +TestsReport = '\n' + +void makeReport() { + for ( test in testsReportMap ) { + TestsReport = TestsReport + "<${test.value}/>\n" + } + TestsReport = TestsReport + '\n' +} + +void setTestsresults() { + testsResultsMap.each { file -> + pushArtifactFile("${file.key}") + } +} + +void runTest(String TEST_NAME, String CLUSTER_PREFIX) { + def retryCount = 0 + waitUntil { + try { + echo "The $TEST_NAME test was started!" + testsReportMap[TEST_NAME] = 'failure' + PPG_TAG = sh(script: "if [ -n \"\${PGO_POSTGRES_HA_IMAGE}\" ] ; then echo ${PGO_POSTGRES_HA_IMAGE} | awk -F':' '{print \$2}' | grep -oE '[A-Za-z0-9\\.]+-ppg[0-9]{2}' ; else echo 'main-ppg13'; fi", , returnStdout: true).trim() + popArtifactFile("${env.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG") + + timeout(time: 120, unit: 'MINUTES') { + sh """ + if [ -f "${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG" ]; then + echo Skip $TEST_NAME test + else + cd ./source + if [ -n "${PG_VERSION}" ]; then + export PG_VER=${PG_VERSION} + fi + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + export IMAGE_OPERATOR=${PGO_OPERATOR_IMAGE} + else + export IMAGE_OPERATOR=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-postgres-operator + fi + + if [ -n "${PGO_APISERVER_IMAGE}" ]; then + export IMAGE_APISERVER=${PGO_APISERVER_IMAGE} + else + export IMAGE_APISERVER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-apiserver + fi + + if [ -n "${PGO_EVENT_IMAGE}" ]; then + export IMAGE_PGOEVENT=${PGO_EVENT_IMAGE} + else + export IMAGE_PGOEVENT=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-event + fi + + if [ -n "${PGO_RMDATA_IMAGE}" ]; then + export IMAGE_RMDATA=${PGO_RMDATA_IMAGE} + else + export IMAGE_RMDATA=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-rmdata + fi + + if [ -n "${PGO_SCHEDULER_IMAGE}" ]; then + export IMAGE_SCHEDULER=${PGO_SCHEDULER_IMAGE} + else + export IMAGE_SCHEDULER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-scheduler + fi + + if [ -n "${PGO_DEPLOYER_IMAGE}" ]; then + export IMAGE_DEPLOYER=${PGO_DEPLOYER_IMAGE} + else + export IMAGE_DEPLOYER=perconalab/percona-postgresql-operator:${env.GIT_BRANCH}-pgo-deployer + fi + + if [ -n "${PGO_PGBOUNCER_IMAGE}" ]; then + export IMAGE_PGBOUNCER=${PGO_PGBOUNCER_IMAGE} + fi + + if [ -n "${PGO_POSTGRES_HA_IMAGE}" ]; then + export IMAGE_PG_HA=${PGO_POSTGRES_HA_IMAGE} + export PG_VER=\$(echo \${IMAGE_PG_HA} | grep -Eo 'ppg[0-9]+'| sed 's/ppg//g') + fi + + if [ -n "${PGO_BACKREST_IMAGE}" ]; then + export IMAGE_BACKREST=${PGO_BACKREST_IMAGE} + fi + + if [ -n "${PGO_BACKREST_REPO_IMAGE}" ]; then + export IMAGE_BACKREST_REPO=${PGO_BACKREST_REPO_IMAGE} + fi + + if [ -n "${PGO_PGBADGER_IMAGE}" ]; then + export IMAGE_PGBADGER=${PGO_PGBADGER_IMAGE} + fi + + if [ -n "${PMM_SERVER_IMAGE_BASE}" ]; then + export IMAGE_PMM_SERVER_REPO=${PMM_SERVER_IMAGE_BASE} + fi + + if [ -n "${PMM_SERVER_IMAGE_TAG}" ]; then + export IMAGE_PMM_SERVER_TAG=${PMM_SERVER_IMAGE_TAG} + fi + + if [ -n "${PMM_CLIENT_IMAGE}" ]; then + export IMAGE_PMM=${PMM_CLIENT_IMAGE} + fi + + export KUBECONFIG=/tmp/$CLUSTER_NAME-${CLUSTER_PREFIX} + source $HOME/google-cloud-sdk/path.bash.inc + ./e2e-tests/$TEST_NAME/run + fi + """ + } + pushArtifactFile("${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG") + testsResultsMap["${params.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}-$TEST_NAME-${params.GKE_VERSION}-$PPG_TAG"] = 'passed' + testsReportMap[TEST_NAME] = 'passed' + return true + } + catch (exc) { + if (retryCount >= 2) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + } + + echo "The $TEST_NAME test was finished!" +} + +void installRpms() { + sh ''' + sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true + sudo percona-release enable-only tools + sudo yum install -y jq | true + ''' +} +pipeline { + environment { + CLOUDSDK_CORE_DISABLE_PROMPTS = 1 + } + parameters { + string( + defaultValue: '1.x', + description: 'Tag/Branch for percona/percona-postgresql-operator repository', + name: 'GIT_BRANCH') + string( + defaultValue: 'https://github.com/percona/percona-postgresql-operator', + description: 'percona-postgresql-operator repository', + name: 'GIT_REPO') + string( + defaultValue: '1.24', + description: 'GKE version', + name: 'GKE_VERSION') + string( + defaultValue: '13', + description: 'PG version', + name: 'PG_VERSION') + choice( + choices: 'NO\nYES', + description: 'GKE alpha/stable', + name: 'IS_GKE_ALPHA') + string( + defaultValue: '', + description: 'Operator image: perconalab/percona-postgresql-operator:1-x-postgres-operator', + name: 'PGO_OPERATOR_IMAGE') + string( + defaultValue: '', + description: 'Operators API server image: perconalab/percona-postgresql-operator:1-x-pgo-apiserver', + name: 'PGO_APISERVER_IMAGE') + string( + defaultValue: '', + description: 'Operators event server image: perconalab/percona-postgresql-operator:1-x-pgo-event', + name: 'PGO_EVENT_IMAGE') + string( + defaultValue: '', + description: 'Operators rmdata image: perconalab/percona-postgresql-operator:1-x-pgo-rmdata', + name: 'PGO_RMDATA_IMAGE') + string( + defaultValue: '', + description: 'Operators scheduler image: perconalab/percona-postgresql-operator:1-x-pgo-scheduler', + name: 'PGO_SCHEDULER_IMAGE') + string( + defaultValue: '', + description: 'Operators deployer image: perconalab/percona-postgresql-operator:1-x-pgo-deployer', + name: 'PGO_DEPLOYER_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBouncer image: perconalab/percona-postgresql-operator:main-ppg13-pgbouncer', + name: 'PGO_PGBOUNCER_IMAGE') + string( + defaultValue: '', + description: 'Operators postgres image: perconalab/percona-postgresql-operator:main-ppg13-postgres-ha', + name: 'PGO_POSTGRES_HA_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest', + name: 'PGO_BACKREST_IMAGE') + string( + defaultValue: '', + description: 'Operators backrest utility image: perconalab/percona-postgresql-operator:main-ppg13-pgbackrest-repo', + name: 'PGO_BACKREST_REPO_IMAGE') + string( + defaultValue: '', + description: 'Operators pgBadger image: perconalab/percona-postgresql-operator:main-ppg13-pgbadger', + name: 'PGO_PGBADGER_IMAGE') + string( + defaultValue: 'perconalab/pmm-server', + description: 'PMM server image base: perconalab/pmm-server', + name: 'PMM_SERVER_IMAGE_BASE') + string( + defaultValue: 'dev-latest', + description: 'PMM server image tag: dev-latest', + name: 'PMM_SERVER_IMAGE_TAG') + string( + defaultValue: 'perconalab/pmm-client:dev-latest', + description: 'PMM server image: perconalab/pmm-client:dev-latest', + name: 'PMM_CLIENT_IMAGE') + } + agent { + label 'docker' + } + options { + skipDefaultCheckout() + disableConcurrentBuilds() + } + + stages { + stage('Prepare') { + steps { + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + # sudo is needed for better node recovery after compilation failure + # if building failed on compilation stage directory will have files owned by docker user + sudo sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + ./cloud/local/checkout $GIT_REPO $GIT_BRANCH + """ + stash includes: "source/**", name: "sourceFILES" + + installRpms() + sh ''' + if [ ! -d $HOME/google-cloud-sdk/bin ]; then + rm -rf $HOME/google-cloud-sdk + curl https://sdk.cloud.google.com | bash + fi + + source $HOME/google-cloud-sdk/path.bash.inc + gcloud components install alpha + gcloud components install kubectl + + curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - + curl -s -L https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz \ + | sudo tar -C /usr/local/bin --strip-components 1 --wildcards -zxvpf - '*/oc' + + sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/3.3.2/yq_linux_amd64 > /usr/local/bin/yq" + sudo chmod +x /usr/local/bin/yq + ''' + unstash "sourceFILES" + withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh ''' + cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE ./source/e2e-tests/conf/cloud-secret-minio-gw.yml + ''' + } + } + } + stage('Build docker image') { + steps { + unstash "sourceFILES" + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { + sh ''' + if [ -n "${PGO_OPERATOR_IMAGE}" ]; then + echo "SKIP: Build is not needed, PG operator image was set!" + else + cd ./source/ + sg docker -c " + docker login -u '${USER}' -p '${PASS}' + export IMAGE_URI_BASE=perconalab/percona-postgresql-operator:$GIT_BRANCH + ./e2e-tests/build + docker logout + " + sudo rm -rf ./build + fi + ''' + } + } + } + stage('Run Tests') { + environment { + CLOUDSDK_CORE_DISABLE_PROMPTS = 1 + CLEAN_NAMESPACE = 1 + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() + VERSION = "${env.GIT_BRANCH}-${env.GIT_SHORT_COMMIT}" + CLUSTER_NAME = sh(script: "echo jkns-ver-pgo-${PG_VERSION}-${GIT_SHORT_COMMIT} | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() + PGO_K8S_NAME = "${env.CLUSTER_NAME}-upstream" + ECR = "119175775298.dkr.ecr.us-east-1.amazonaws.com" + } + parallel { + stage('E2E Basic tests') { + steps { + CreateCluster('sandbox') + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + runTest('init-deploy', 'sandbox') + } + runTest('scaling', 'sandbox') + runTest('recreate', 'sandbox') + runTest('affinity', 'sandbox') + runTest('monitoring', 'sandbox') + runTest('self-healing', 'sandbox') + runTest('operator-self-healing', 'sandbox') + runTest('clone-cluster', 'sandbox') + runTest('tls-check', 'sandbox') + runTest('users', 'sandbox') + runTest('ns-mode', 'sandbox') + ShutdownCluster('sandbox') + } + } + stage('E2E demand-backup') { + steps { + CreateCluster('demand-backup') + runTest('demand-backup', 'demand-backup') + ShutdownCluster('demand-backup') + } + } + stage('E2E scheduled-backup') { + steps { + CreateCluster('scheduled-backup') + runTest('scheduled-backup', 'scheduled-backup') + ShutdownCluster('scheduled-backup') + } + } + stage('E2E Upgrade') { + steps { + CreateCluster('upgrade') + runTest('upgrade', 'upgrade') + runTest('smart-update', 'upgrade') + ShutdownCluster('upgrade') + } + } + stage('E2E Version-service') { + steps { + CreateCluster('version-service') + runTest('version-service', 'version-service') + ShutdownCluster('version-service') + } + } + } + } + } + post { + always { + setTestsresults() + + makeReport() + sh """ + echo "${TestsReport}" > TestsReport.xml + """ + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml' + + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + sh ''' + export CLUSTER_NAME=$(echo jkns-ver-pgo-${PG_VERSION}-$(git -C source rev-parse --short HEAD) | tr '[:upper:]' '[:lower:]') + source $HOME/google-cloud-sdk/path.bash.inc + gcloud auth activate-service-account alpha-svc-acct@"${GCP_PROJECT}".iam.gserviceaccount.com --key-file=$CLIENT_SECRET_FILE + gcloud config set project $GCP_PROJECT + gcloud container clusters list --format='csv[no-heading](name)' --filter $CLUSTER_NAME | xargs gcloud container clusters delete --zone ${GKERegion} --quiet || true + ''' + } + sh ''' + sudo docker rmi -f \$(sudo docker images -q) || true + sudo rm -rf ./* + sudo rm -rf $HOME/google-cloud-sdk + ''' + deleteDir() + } + } +}