Skip to content

Commit

Permalink
add docs, cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
anveshreddy18 committed May 29, 2024
1 parent 3f37a55 commit 8d1c313
Show file tree
Hide file tree
Showing 9 changed files with 143 additions and 59 deletions.
12 changes: 7 additions & 5 deletions .ci/integration_test
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,11 @@ function create_etcd_data_directory() {
mkdir -p ${ETCD_DATA_DIR}
}

function get_aws_existing_region() {
function get_aws_existing_credentials() {
export REGION=`cat ${HOME}/.aws/config | grep -e "^.*region.*$" | sed "s/^.*region[ ]*=[ ]*//"`
export AWS_DEFAULT_REGION=${REGION}
export AWS_ACCESS_KEY_ID=`cat ${HOME}/.aws/credentials | grep -e "^.*aws_access_key_id.*$" | sed "s/^.*aws_access_key_id[ ]*=[ ]*//"`
export AWS_SECRET_ACCESS_KEY=`cat ${HOME}/.aws/credentials | grep -e "^.*aws_secret_access_key.*$" | sed "s/^.*aws_secret_access_key[ ]*=[ ]*//"`
}

#############################
Expand Down Expand Up @@ -174,7 +177,6 @@ function create_aws_secret() {

function delete_aws_secret() {
rm -rf ${HOME}/.aws
unset AWS_APPLICATION_CREDENTIALS_JSON AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION
}

function create_s3_bucket() {
Expand All @@ -196,7 +198,7 @@ function setup-aws-infrastructure() {
if [[ "${USE_EXISTING_AWS_SECRET}" == "1" ]]; then
create_aws_secret
else
get_aws_existing_region
get_aws_existing_credentials
fi
create_s3_bucket
echo "AWS infrastructure setup completed."
Expand Down Expand Up @@ -291,10 +293,10 @@ function run_test_on_cluster() {
setup-aws-infrastructure
fi

export ETCD_VERSION=${ETCD_VERSION:-"v3.4.13-bootstrap-1"}
export ETCD_VERSION=${ETCD_VERSION:-"v0.1.1"}
echo "Etcd version: ${ETCD_VERSION}"

export ETCDBR_VERSION=${ETCDBR_VERSION:-${ETCDBR_VER:-"v0.24.7"}}
export ETCDBR_VERSION=${ETCDBR_VERSION:-${ETCDBR_VER:-"v0.28.0"}}
echo "Etcd-backup-restore version: ${ETCDBR_VERSION}"

echo "Starting integration tests on k8s cluster."
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ kind-up: $(KIND)

.PHONY: kind-down
kind-down: $(KIND)
$(KIND) delete cluster --name etcdbr-e2e
kind delete cluster --name etcdbr-e2e

.PHONY: deploy-localstack
deploy-localstack: $(KUBECTL)
Expand All @@ -103,7 +103,7 @@ deploy-azurite: $(KUBECTL)

.PHONY: ci-e2e-kind
ci-e2e-kind:
./hack/ci-e2e-kind.sh
./hack/ci-e2e-kind.sh $(PROVIDERS)

.PHONY: pr-test-e2e
pr-test-e2e:
Expand Down
1 change: 0 additions & 1 deletion chart/etcd-backup-restore/templates/etcd-peer-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,3 @@ spec:
protocol: TCP
port: {{ .Values.servicePorts.server }}
targetPort: {{ .Values.servicePorts.server }}

4 changes: 2 additions & 2 deletions chart/etcd-backup-restore/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@ images:
# etcd image to use
etcd:
repository: europe-docker.pkg.dev/gardener-project/releases/gardener/etcd-wrapper
tag: v0.1.1 #v3.4.13-bootstrap-1
tag: v0.1.1
pullPolicy: IfNotPresent
# etcd-backup-restore image to use
etcdBackupRestore:
repository: anveshreddy18/etcd-backup-restore #europe-docker.pkg.dev/gardener-project/snapshots/gardener/etcdbrctl
repository: europe-docker.pkg.dev/gardener-project/public/gardener/etcdbrctl
tag: v0.28.0
pullPolicy: IfNotPresent

Expand Down
4 changes: 2 additions & 2 deletions docs/development/testing_and_dependencies.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ make integration-test-cluster
:warning: Prerequisite for this command is to set the following environment variables:

- INTEGRATION_TEST_KUBECONFIG: kubeconfig to the cluster on which you wish to run the test
- ETCD_VERSION: optional, defaults to `v3.4.13-bootstrap-1`
- ETCDBR_VERSION: optional, defaults to `v0.12.1`
- ETCD_VERSION: optional, defaults to etcd-wrapper `v0.1.1`
- ETCDBR_VERSION: optional, defaults to `v0.28.0`
- ACCESS_KEY_ID: S3 credentials
- SECRET_ACCESS_KEY: S3 credentials
- REGION: S3 credentials
Expand Down
67 changes: 67 additions & 0 deletions docs/development/tests.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,77 @@ Integration tests include the basic working of:
- **data validation**: corrupted etcd data should be marked for deletion and restoration should be triggered
- **restoration**: etcd data should be restored correctly from latest set of snapshots (full + deltas)

The Integration tests can be in multiple ways:

**Note**: The tests expects that the aws credentials are present in the `$HOME/.aws` directory. Make sure to provide the correct credentials before running the tests.

- **Process**: The tests can be run locally with both `etcd` and `etcdbr` running as processes. To execute the tests, run the following command:

```sh
make integration-test
```

- **Cluster**: The tests can be run on a Kubernetes cluster. The tests create a provider specific namespace on the cluster and deploy the [etcd-backup-restore helm chart](../../chart/etcd-backup-restore) which in turn deploys the required secrets, configmap, services and finally the statefulset which contains the pod that runs etcd and backup-restore as a sidecar. To execute the tests, run the following command:

```sh
make integration-test-cluster
```
**Note**: Prerequisite for this command is to set the following environment variables:
1) INTEGRATION_TEST_KUBECONFIG: kubeconfig to the cluster on which you wish to run the test
2) ETCD_VERSION: optional, defaults to `v0.1.1`
3) ETCDBR_VERSION: optional, defaults to `v0.28.0`

### Unit tests

Each package within this repo contains its own set of unit tests to test the functionality of the methods contained within the packages.

### Performance regression tests

These tests help check any regression in performance in terms of memory consumption and CPU utilization.

### End-to-end tests

The e2e tests for etcd-backup-restore are the integrationcluster tests in the `test/e2e/integrationcluster` package. These tests are run on a Kubernetes cluster and test the full functionality of etcd-backup-restore. The tests create a provider namespace on the cluster and deploy the [etcd-backup-restore helm chart](../../chart/etcd-backup-restore) which in turn deploys the required secrets, configmap, services and finally the statefulset which deploys the pod that runs etcd and backup-restore as a sidecar.

These tests are setup to be run with both emulators and real cloud providers. The emulators can be used for local development and testing as well as prow job to test code changes when a PR is raised. The real cloud providers can be used for testing in a real cloud environment to ensure that the changes work as expected in a real environment.

Currently the tests are run on the following cloud providers:
- AWS
- GCP
- Azure

To run the e2e tests with the emulators, run the following command:

```sh
make ci-e2e-kind PROVIDERS="{providers}"
```

By default, when no provider is specified, the tests are run using AWS emulator i.e Localstack as storage provider. The provider can be specified as comma separated values of the cloud providers mentioned above in small case. For example, to run the tests on AWS and GCP, run the following command:

```sh
make ci-e2e-kind PROVIDERS="aws,gcp"
```


To run the tests with real cloud providers, a few changes need to be made to the `hack/ci-e2e-kind.sh` script. The script needs to be updated with the correct credentials for the cloud providers and remove the variables for the emulators.

#### AWS

For AWS, first get the AWS credentials and update the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` variables of the `hack/ci-e2e-kind.sh` script with the correct values along with removing the variables for the Localstack provider, which are `AWS_ENDPOINT_URL_S3` and `LOCALSTACK_HOST` from the make command of the `hack/ci-e2e-kind.sh` script. Then should also update the creation of `/tmp/aws.json` file with the correct values. It should look like below snippet after removing the `endpoint` and `s3ForcePathStyle` fields:

```sh
export AWS_APPLICATION_CREDENTIALS_JSON="/tmp/aws.json"
echo "{ \"accessKeyID\": \"${AWS_ACCESS_KEY_ID}\", \"secretAccessKey\": \"${AWS_SECRET_ACCESS_KEY}\", \"region\": \"${AWS_DEFAULT_REGION}\" }" > "${AWS_APPLICATION_CREDENTIALS_JSON}"
```

With these changes made, the tests can be run in the same way as with the emulators.

#### GCP

For GCP, first get the GCP credential service account json file and replace the `GOOGLE_APPLICATION_CREDENTIALS` with the path to this service account file. And also update the `GCP_PROJECT_ID` variable and set it to your project ID. We also need to remove the required environment variables for the fakegcs provider from the make command of the `hack/ci-e2e-kind.sh` file, for that one should remove the variables `GOOGLE_EMULATOR_ENABLED`, `GCS_EMULATOR_HOST` and `GOOGLE_STORAGE_API_ENDPOINT` and run the tests in the same way as with the emulators.

#### Azure

For Azure, first get the Azure credentials and update the `STORAGE_ACCOUNT` and `STORAGE_KEY` env variables in the `hack/ci-e2e-kind.sh` script with the correct values. Also, remove the variables for the azurite, which are `AZURE_STORAGE_API_ENDPOINT`, `AZURE_EMULATOR_ENABLED`, `AZURITE_HOST` and `AZURE_STORAGE_CONNECTION_STRING` from the make command of the `hack/ci-e2e-kind.sh` script. With these changes made, the tests can be run in the same way as with the emulators.

The e2e tests can also be run on a real cluster by setting the `KUBECONFIG` environment variable to the path of the kubeconfig file of the cluster. The tests can be run in the same way as with the emulators.
42 changes: 26 additions & 16 deletions hack/ci-e2e-kind.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,27 @@ trap "
" EXIT

kubectl wait --for=condition=ready node --all
export ETCD_VERSION="v0.1.1" #v3.4.13-bootstrap-1
export ETCDBR_VERSION="v3.6" #v0.29.0-dev (for anveshreddy18 dockerhub)
export ETCD_VERSION="v0.1.1"
export ETCDBR_VERSION="v0.29.0-dev"

# AWS S3 #
LOCALSTACK_HOST="localstack.default:4566"
AWS_ENDPOINT_URL_S3="http://localhost:4566"
AWS_ACCESS_KEY_ID="ACCESSKEYAWSUSER"
AWS_SECRET_ACCESS_KEY="sEcreTKey"
AWS_DEFAULT_REGION=us-east-2

export AWS_APPLICATION_CREDENTIALS_JSON="/tmp/aws.json"
echo "{ \"accessKeyID\": \"${AWS_ACCESS_KEY_ID}\", \"secretAccessKey\": \"${AWS_SECRET_ACCESS_KEY}\", \"region\": \"${AWS_DEFAULT_REGION}\", \"endpoint\": \"${AWS_ENDPOINT_URL_S3}\" , \"s3ForcePathStyle\": true }" > "${AWS_APPLICATION_CREDENTIALS_JSON}"

# Google Cloud Storage #
GOOGLE_EMULATOR_HOST="fake-gcs.default:8000"
GOOGLE_STORAGE_API_ENDPOINT="http://localhost:8000/storage/v1/"

export GOOGLE_APPLICATION_CREDENTIALS="path/to/service-account.json"
export GCP_PROJECT_ID="your-project-id"

# Azure Blob Storage #
STORAGE_ACCOUNT="devstoreaccount1"
STORAGE_KEY="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
AZURE_STORAGE_API_ENDPOINT="http://localhost:10000"
Expand All @@ -29,24 +46,17 @@ mkdir -p "${AZURE_APPLICATION_CREDENTIALS}"
echo -n "${STORAGE_ACCOUNT}" > "${AZURE_APPLICATION_CREDENTIALS}/storageAccount"
echo -n "${STORAGE_KEY}" > "${AZURE_APPLICATION_CREDENTIALS}/storageKey"

export AWS_APPLICATION_CREDENTIALS_JSON="/tmp/aws.json"
echo "{ \"accessKeyID\": \"ACCESSKEYAWSUSER\", \"secretAccessKey\": \"sEcreTKey\", \"region\": \"us-east-2\", \"endpoint\": \"http://127.0.0.1:4566\", \"s3ForcePathStyle\": true }" >/tmp/aws.json

# GOOGLE_APPLICATION_CREDENTIALS="/Users/i586337/Downloads/svc_acc.json" \
# GCP_PROJECT_ID="sap-se-gcp-k8s-dev-team" \


: ${TEST_PROVIDERS:="aws"}
TEST_PROVIDERS=${1:-$TEST_PROVIDERS}

make LOCALSTACK_HOST="localstack.default:4566" \
AWS_ENDPOINT_URL_S3="http://localhost:4566" \
AWS_ACCESS_KEY_ID="ACCESSKEYAWSUSER" \
AWS_SECRET_ACCESS_KEY="sEcreTKey" \
AWS_DEFAULT_REGION=us-east-2 \
make LOCALSTACK_HOST=${LOCALSTACK_HOST} \
AWS_ENDPOINT_URL_S3=${AWS_ENDPOINT_URL_S3} \
AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
GOOGLE_EMULATOR_ENABLED="true" \
GOOGLE_EMULATOR_HOST="fake-gcs.default:8000" \
GOOGLE_STORAGE_API_ENDPOINT="http://localhost:8000/storage/v1/" \
GOOGLE_EMULATOR_HOST=${GOOGLE_EMULATOR_HOST} \
GOOGLE_STORAGE_API_ENDPOINT=${GOOGLE_STORAGE_API_ENDPOINT} \
STORAGE_ACCOUNT=${STORAGE_ACCOUNT} \
STORAGE_KEY=${STORAGE_KEY} \
AZURE_STORAGE_API_ENDPOINT=${AZURE_STORAGE_API_ENDPOINT} \
Expand Down
63 changes: 34 additions & 29 deletions hack/e2e-test/run-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ function cleanup_aws_infrastructure() {
}

function cleanup_gcs_infrastructure() {
if [[ -n ${GOOGLE_APPLICATION_CREDENTIALS:-""} ]]; then
if [[ -z ${GOOGLE_EMULATOR_ENABLED:-""} ]]; then
result=$(gsutil list gs://${TEST_ID} 2>&1 || true)
if [[ $result == *"404"* ]]; then
echo "GCS bucket is already deleted."
Expand Down Expand Up @@ -144,14 +144,17 @@ Please make sure the following environment variables are set:
AWS_ACCESS_KEY_ID Key ID of the user.
AWS_SECRET_ACCESS_KEY Access key of the user.
AWS_DEFAULT_REGION Region in which the test bucket is created.
LOCALSTACK_HOST Host of the localstack service. ( optional: required for testing with localstack)
AWS_ENDPOINT_URL_S3 URL of the S3 endpoint. ( optional: required for testing with localstack)
LOCALSTACK_HOST Host of the localstack service. ( required only for testing with localstack)
AWS_ENDPOINT_URL_S3 URL of the S3 endpoint. ( required only for testing with localstack)
EOM
exit 0
}

function setup_aws_e2e() {
( [[ -z ${AWS_ACCESS_KEY_ID:-""} ]] || [[ -z ${AWS_SECRET_ACCESS_KEY:=""} ]] || [[ -z ${AWS_DEFAULT_REGION:=""} ]] ) && usage_aws
if [[ -z ${AWS_ACCESS_KEY_ID:-""} ]] || [[ -z ${AWS_SECRET_ACCESS_KEY:=""} ]] || [[ -z ${AWS_DEFAULT_REGION:=""} ]]; then
usage_aws
exit 1
fi
if [[ -n ${LOCALSTACK_HOST:-""} ]]; then
make deploy-localstack
else
Expand All @@ -170,7 +173,7 @@ function setup_gcscli() {
function setup_gcs_infrastructure() {
echo "Setting up GCS infrastructure..."
echo "Creating test bucket..."
if [[ -n ${GOOGLE_APPLICATION_CREDENTIALS:-""} ]]; then
if [[ -z ${GOOGLE_EMULATOR_ENABLED:-""} ]]; then
gsutil mb "gs://${TEST_ID}"
else
gsutil -o "Credentials:gs_json_host=127.0.0.1" -o "Credentials:gs_json_port=4443" -o "Boto:https_validate_certificates=False" mb "gs://${TEST_ID}"
Expand All @@ -185,11 +188,11 @@ Usage:
run-e2e-test.sh gcs
Please make sure the following environment variables are set:
GOOGLE_APPLICATION_CREDENTIALS Path to the service account key file. ( for real infra )
GCP_PROJECT_ID Project ID of the GCP project. ( for real infra )
GOOGLE_EMULATOR_HOST Host of the fake GCS server. ( for fakegcs )
GOOGLE_EMULATOR_ENABLED Set to "true" to Enable the fake GCS server for testing. ( for fakegcs )
GOOGLE_STORAGE_API_ENDPOINT URL of the GCS storage endpoint ( for fakegcs )
GOOGLE_APPLICATION_CREDENTIALS Path to the service account key file.
GCP_PROJECT_ID Project ID of the GCP project.
GOOGLE_EMULATOR_HOST Host of the fake GCS server. ( required only for fakegcs )
GOOGLE_EMULATOR_ENABLED Set to "true" to Enable the fake GCS server for testing. ( required only for fakegcs )
GOOGLE_STORAGE_API_ENDPOINT URL of the GCS storage endpoint ( required only for fakegcs )
EOM
exit 0
}
Expand All @@ -205,11 +208,17 @@ function authorize_gcloud() {
}

function setup_gcs_e2e() {
if [[ -n ${GOOGLE_APPLICATION_CREDENTIALS:-""} ]]; then
( [[ -z ${GCP_PROJECT_ID:-""} ]] ) && usage_gcs
if [[ -z ${GOOGLE_EMULATOR_ENABLED:-""} ]]; then
if [[ -z ${GCP_PROJECT_ID:-""} ]] || [[ -z ${GOOGLE_APPLICATION_CREDENTIALS} ]]; then
usage_gcs
exit 1
fi
authorize_gcloud
else
( [[ -z ${GOOGLE_EMULATOR_ENABLED:-""} ]] || [[ -z ${GOOGLE_EMULATOR_HOST:-""} ]] || [[ -z ${GOOGLE_STORAGE_API_ENDPOINT:-""} ]] ) && usage_gcs
if [[ -z ${GOOGLE_EMULATOR_HOST:-""} ]] || [[ -z ${GOOGLE_STORAGE_API_ENDPOINT:-""} ]]; then
usage_gcs
exit 1
fi
echo "GOOGLE_APPLICATION_CREDENTIALS is not set. Using fake GCS server for testing."
make deploy-fakegcs
fi
Expand All @@ -218,11 +227,6 @@ function setup_gcs_e2e() {
}

function setup_azure_infrastructure() {
export AZURE_APPLICATION_CREDENTIALS="/tmp/azuriteCredentials"
mkdir -p "${AZURE_APPLICATION_CREDENTIALS}"
echo -n "${STORAGE_ACCOUNT}" > "${AZURE_APPLICATION_CREDENTIALS}/storageAccount"
echo -n "${STORAGE_KEY}" > "${AZURE_APPLICATION_CREDENTIALS}/storageKey"

echo "Setting up Azure infrastructure..."
echo "Creating test bucket..."
if [[ -n ${AZURE_EMULATOR_ENABLED:-""} ]]; then
Expand Down Expand Up @@ -254,20 +258,26 @@ Please make sure the following environment variables are set:
STORAGE_ACCOUNT Name of the storage account.
STORAGE_KEY Key of the storage account.
AZURE_STORAGE_API_ENDPOINT URL of the Azure storage endpoint. ( optional: required for testing with Azurite)
AZURE_EMULATOR_ENABLED Set to "true" to Enable the Azure emulator for testing. ( optional: required for testing with Azurite)
AZURITE_HOST Host of the Azurite service. ( optional: required for testing with Azurite)
AZURE_STORAGE_CONNECTION_STRING Connection string for the Azure storage account. ( optional: required for testing with Azurite)
AZURE_STORAGE_API_ENDPOINT URL of the Azure storage endpoint. ( required only for testing with Azurite)
AZURE_EMULATOR_ENABLED Set to "true" to Enable the Azure emulator for testing. ( required only for testing with Azurite)
AZURITE_HOST Host of the Azurite service. ( required only for testing with Azurite)
AZURE_STORAGE_CONNECTION_STRING Connection string for the Azure storage account. ( required only for testing with Azurite)
EOM
exit 0
}

function setup_azure_e2e() {
if [[ -n ${AZURE_EMULATOR_ENABLED:-""} ]]; then
( [[ -z ${STORAGE_ACCOUNT:-""} ]] || [[ -z ${STORAGE_KEY:-""} ]] || [[ -z ${AZURE_STORAGE_API_ENDPOINT:-""} ]] || [[ -z ${AZURITE_HOST:-""} ]] ) && usage_azure
if [[ -z ${STORAGE_ACCOUNT:-""} ]] || [[ -z ${STORAGE_KEY:-""} ]] || [[ -z ${AZURE_STORAGE_API_ENDPOINT:-""} ]] || [[ -z ${AZURITE_HOST:-""} ]] || [[ -z ${AZURE_STORAGE_CONNECTION_STRING:-""} ]]; then
usage_azure
exit 1
fi
make deploy-azurite
else
( [[ -z ${STORAGE_ACCOUNT:-""} ]] || [[ -z ${STORAGE_KEY:-""} ]] ) && usage_azure
if [[ -z ${STORAGE_ACCOUNT:-""} ]] || [[ -z ${STORAGE_KEY:-""} ]]; then
usage_azure
exit 1
fi
echo "AZURE_EMULATOR_ENABLED is not set. Using Azure services for testing."
fi
setup_azcli
Expand All @@ -278,12 +288,7 @@ run_cluster_tests() {
if ! [ -x "$(command -v ginkgo)" ]; then
setup_ginkgo
fi

get_test_id
export ETCD_VERSION=${ETCD_VERSION:-"v0.1.1"}
echo "Etcd version: ${ETCD_VERSION}"
export ETCDBR_VERSION=${ETCDBR_VERSION:-${ETCDBR_VER:-"v0.28.0"}}
echo "Etcd-backup-restore version: ${ETCDBR_VERSION}"

# Setup the infrastructure for the providers in parallel to reduce the setup time.
for p in ${1//,/ }; do
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/integrationcluster/integrationcluster_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,9 @@ var _ = BeforeSuite(func() {
"tag": etcdVersion,
},
"etcdBackupRestore": map[string]interface{}{
"tag": etcdbrVersion,
// "pullPolicy": "Never",
"repository": "europe-docker.pkg.dev/gardener-project/snapshots/gardener/etcdbrctl",
"tag": etcdbrVersion,
"pullPolicy": "Never",
},
},
"backup": map[string]interface{}{
Expand Down

0 comments on commit 8d1c313

Please sign in to comment.