From b66fbe90f9a8c76cbd98ffc0ae2adcc74b02c5c1 Mon Sep 17 00:00:00 2001 From: ajanikow <12255597+ajanikow@users.noreply.github.com> Date: Sat, 8 May 2021 09:24:42 +0000 Subject: [PATCH] Remove tests from Operator repository --- Makefile | 186 +-- tests/acceptance/.gitignore | 3 - tests/acceptance/activefailover.template.yaml | 10 - tests/acceptance/activefailover.yaml | 7 - .../cluster-local-storage.template.yaml | 14 - .../acceptance/cluster-scale-1.template.yaml | 12 - .../acceptance/cluster-scale-2.template.yaml | 14 - .../acceptance/cluster-scale-3.template.yaml | 14 - .../acceptance/cluster-scale-4.template.yaml | 14 - tests/acceptance/cluster-sync.template.yaml | 15 - tests/acceptance/cluster-sync.yaml | 9 - tests/acceptance/cluster-sync2.template.yaml | 14 - tests/acceptance/cluster.template.yaml | 10 - tests/acceptance/cluster.yaml | 7 - tests/acceptance/cluster12-replication.yaml | 13 - tests/acceptance/generate.sh | 48 - tests/acceptance/local-storage.template.yaml | 9 - tests/acceptance/local-storage.yaml | 9 - tests/acceptance/semiautomation/README.md | 18 - .../semiautomation/activefailover.yaml | 13 - .../semiautomation/cluster-local-storage.yaml | 17 - .../semiautomation/cluster-sync.yaml | 18 - .../semiautomation/cluster-sync1.yaml | 19 - .../semiautomation/cluster-sync2.yaml | 17 - tests/acceptance/semiautomation/cluster.yaml | 13 - tests/acceptance/semiautomation/helper.fish | 136 -- .../semiautomation/local-storage.yaml | 9 - .../semiautomation/replication.yaml | 13 - tests/acceptance/semiautomation/single.yaml | 13 - tests/acceptance/semiautomation/test1a.fish | 38 - tests/acceptance/semiautomation/test1b.fish | 41 - tests/acceptance/semiautomation/test1c.fish | 40 - tests/acceptance/semiautomation/test1d.fish | 44 - tests/acceptance/semiautomation/test2a.fish | 56 - tests/acceptance/semiautomation/test2b.fish | 77 - tests/acceptance/semiautomation/test3a.fish | 38 - tests/acceptance/semiautomation/test3b.fish | 41 - tests/acceptance/semiautomation/test3c.fish | 40 - tests/acceptance/semiautomation/test3d.fish | 77 - tests/acceptance/semiautomation/test4a.fish | 52 - tests/acceptance/semiautomation/test4b.fish | 52 - tests/acceptance/semiautomation/test5a.fish | 39 - tests/acceptance/semiautomation/test5b.fish | 41 - tests/acceptance/semiautomation/test5c.fish | 40 - tests/acceptance/semiautomation/test6a.fish | 38 - tests/acceptance/semiautomation/test6b.fish | 40 - tests/acceptance/semiautomation/test6c.fish | 40 - tests/acceptance/semiautomation/test6d.fish | 38 - tests/acceptance/semiautomation/test6e.fish | 40 - tests/acceptance/semiautomation/test6f.fish | 40 - tests/acceptance/semiautomation/test6g.fish | 52 - tests/acceptance/semiautomation/test7a.fish | 78 - tests/acceptance/single.template.yaml | 10 - tests/acceptance/single.yaml | 7 - tests/annotations_test.go | 342 ----- tests/auth_test.go | 319 ----- tests/backup_test.go | 1252 ----------------- tests/change_args_test.go | 194 --- tests/cursor_test.go | 376 ----- tests/db_util.go | 64 - tests/deployments_test.go | 186 --- tests/duration/Dockerfile | 5 - tests/duration/README.md | 32 - tests/duration/main.go | 132 -- tests/duration/simple/error.go | 31 - tests/duration/simple/simple.go | 679 --------- tests/duration/simple/simple_collection.go | 94 -- tests/duration/simple/simple_create.go | 50 - tests/duration/simple/simple_import.go | 79 -- tests/duration/simple/simple_query.go | 66 - tests/duration/simple/simple_query_update.go | 115 -- tests/duration/simple/simple_read.go | 88 -- tests/duration/simple/simple_rebalance.go | 40 - tests/duration/simple/simple_remove.go | 71 - tests/duration/simple/simple_replace.go | 92 -- tests/duration/simple/simple_update.go | 87 -- tests/duration/test/shuffle.go | 43 - tests/duration/test/test.go | 66 - tests/duration/test_listener.go | 88 -- tests/duration/test_loop.go | 113 -- tests/environments_test.go | 90 -- tests/immutable_test.go | 137 -- tests/load_balancer_source_ranges_test.go | 158 --- tests/load_balancer_test.go | 226 --- tests/member_resilience_test.go | 333 ----- tests/metrics_test.go | 155 -- tests/operator_upgrade_test.go | 190 --- tests/pc_test.go | 127 -- tests/pdb_test.go | 120 -- tests/persistent_volumes_test.go | 488 ------- tests/predicates.go | 72 - tests/prepull_image_util.go | 101 -- tests/resilience_test.go | 435 ------ tests/resources_test.go | 122 -- tests/rocksdb_encryption_test.go | 94 -- tests/scale_test.go | 283 ---- tests/secret_hashes_test.go | 144 -- tests/service_account_test.go | 292 ---- tests/sidecar_test.go | 427 ------ tests/simple_test.go | 201 --- tests/sync/Dockerfile | 5 - tests/sync/main.go | 427 ------ tests/sync_test.go | 255 ---- tests/test_util.go | 939 ------------- tests/timeout.go | 41 - tests/upgrade_test.go | 280 ---- tests/version_test.go | 66 - 107 files changed, 1 insertion(+), 12704 deletions(-) delete mode 100644 tests/acceptance/.gitignore delete mode 100644 tests/acceptance/activefailover.template.yaml delete mode 100644 tests/acceptance/activefailover.yaml delete mode 100644 tests/acceptance/cluster-local-storage.template.yaml delete mode 100644 tests/acceptance/cluster-scale-1.template.yaml delete mode 100644 tests/acceptance/cluster-scale-2.template.yaml delete mode 100644 tests/acceptance/cluster-scale-3.template.yaml delete mode 100644 tests/acceptance/cluster-scale-4.template.yaml delete mode 100644 tests/acceptance/cluster-sync.template.yaml delete mode 100644 tests/acceptance/cluster-sync.yaml delete mode 100644 tests/acceptance/cluster-sync2.template.yaml delete mode 100644 tests/acceptance/cluster.template.yaml delete mode 100644 tests/acceptance/cluster.yaml delete mode 100644 tests/acceptance/cluster12-replication.yaml delete mode 100755 tests/acceptance/generate.sh delete mode 100644 tests/acceptance/local-storage.template.yaml delete mode 100644 tests/acceptance/local-storage.yaml delete mode 100644 tests/acceptance/semiautomation/README.md delete mode 100644 tests/acceptance/semiautomation/activefailover.yaml delete mode 100644 tests/acceptance/semiautomation/cluster-local-storage.yaml delete mode 100644 tests/acceptance/semiautomation/cluster-sync.yaml delete mode 100644 tests/acceptance/semiautomation/cluster-sync1.yaml delete mode 100644 tests/acceptance/semiautomation/cluster-sync2.yaml delete mode 100644 tests/acceptance/semiautomation/cluster.yaml delete mode 100644 tests/acceptance/semiautomation/helper.fish delete mode 100644 tests/acceptance/semiautomation/local-storage.yaml delete mode 100644 tests/acceptance/semiautomation/replication.yaml delete mode 100644 tests/acceptance/semiautomation/single.yaml delete mode 100755 tests/acceptance/semiautomation/test1a.fish delete mode 100755 tests/acceptance/semiautomation/test1b.fish delete mode 100755 tests/acceptance/semiautomation/test1c.fish delete mode 100755 tests/acceptance/semiautomation/test1d.fish delete mode 100755 tests/acceptance/semiautomation/test2a.fish delete mode 100755 tests/acceptance/semiautomation/test2b.fish delete mode 100755 tests/acceptance/semiautomation/test3a.fish delete mode 100755 tests/acceptance/semiautomation/test3b.fish delete mode 100755 tests/acceptance/semiautomation/test3c.fish delete mode 100755 tests/acceptance/semiautomation/test3d.fish delete mode 100755 tests/acceptance/semiautomation/test4a.fish delete mode 100755 tests/acceptance/semiautomation/test4b.fish delete mode 100755 tests/acceptance/semiautomation/test5a.fish delete mode 100755 tests/acceptance/semiautomation/test5b.fish delete mode 100755 tests/acceptance/semiautomation/test5c.fish delete mode 100755 tests/acceptance/semiautomation/test6a.fish delete mode 100755 tests/acceptance/semiautomation/test6b.fish delete mode 100755 tests/acceptance/semiautomation/test6c.fish delete mode 100755 tests/acceptance/semiautomation/test6d.fish delete mode 100755 tests/acceptance/semiautomation/test6e.fish delete mode 100755 tests/acceptance/semiautomation/test6f.fish delete mode 100755 tests/acceptance/semiautomation/test6g.fish delete mode 100755 tests/acceptance/semiautomation/test7a.fish delete mode 100644 tests/acceptance/single.template.yaml delete mode 100644 tests/acceptance/single.yaml delete mode 100644 tests/annotations_test.go delete mode 100644 tests/auth_test.go delete mode 100644 tests/backup_test.go delete mode 100644 tests/change_args_test.go delete mode 100644 tests/cursor_test.go delete mode 100644 tests/db_util.go delete mode 100644 tests/deployments_test.go delete mode 100644 tests/duration/Dockerfile delete mode 100644 tests/duration/README.md delete mode 100644 tests/duration/main.go delete mode 100644 tests/duration/simple/error.go delete mode 100644 tests/duration/simple/simple.go delete mode 100644 tests/duration/simple/simple_collection.go delete mode 100644 tests/duration/simple/simple_create.go delete mode 100644 tests/duration/simple/simple_import.go delete mode 100644 tests/duration/simple/simple_query.go delete mode 100644 tests/duration/simple/simple_query_update.go delete mode 100644 tests/duration/simple/simple_read.go delete mode 100644 tests/duration/simple/simple_rebalance.go delete mode 100644 tests/duration/simple/simple_remove.go delete mode 100644 tests/duration/simple/simple_replace.go delete mode 100644 tests/duration/simple/simple_update.go delete mode 100644 tests/duration/test/shuffle.go delete mode 100644 tests/duration/test/test.go delete mode 100644 tests/duration/test_listener.go delete mode 100644 tests/duration/test_loop.go delete mode 100644 tests/environments_test.go delete mode 100644 tests/immutable_test.go delete mode 100644 tests/load_balancer_source_ranges_test.go delete mode 100644 tests/load_balancer_test.go delete mode 100644 tests/member_resilience_test.go delete mode 100644 tests/metrics_test.go delete mode 100644 tests/operator_upgrade_test.go delete mode 100644 tests/pc_test.go delete mode 100644 tests/pdb_test.go delete mode 100644 tests/persistent_volumes_test.go delete mode 100644 tests/predicates.go delete mode 100644 tests/prepull_image_util.go delete mode 100644 tests/resilience_test.go delete mode 100644 tests/resources_test.go delete mode 100644 tests/rocksdb_encryption_test.go delete mode 100644 tests/scale_test.go delete mode 100644 tests/secret_hashes_test.go delete mode 100644 tests/service_account_test.go delete mode 100644 tests/sidecar_test.go delete mode 100644 tests/simple_test.go delete mode 100644 tests/sync/Dockerfile delete mode 100644 tests/sync/main.go delete mode 100644 tests/sync_test.go delete mode 100644 tests/test_util.go delete mode 100644 tests/timeout.go delete mode 100644 tests/upgrade_test.go delete mode 100644 tests/version_test.go diff --git a/Makefile b/Makefile index d3925a151..3f339418c 100644 --- a/Makefile +++ b/Makefile @@ -31,8 +31,6 @@ PULSAR := $(GOBUILDDIR)/bin/pulsar$(shell go env GOEXE) GOASSETSBUILDER := $(GOBUILDDIR)/bin/go-assets-builder$(shell go env GOEXE) DOCKERFILE := Dockerfile -DOCKERTESTFILE := Dockerfile.test -DOCKERDURATIONTESTFILE := tests/duration/Dockerfile HELM ?= $(shell which helm) @@ -101,12 +99,6 @@ endif ifndef OPERATORUBIIMAGE OPERATORUBIIMAGE := $(DOCKERNAMESPACE)/kube-arangodb$(IMAGESUFFIX)-ubi endif -ifndef TESTIMAGE - TESTIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-test$(IMAGESUFFIX) -endif -ifndef DURATIONTESTIMAGE - DURATIONTESTIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-durationtest$(IMAGESUFFIX) -endif ifndef ENTERPRISEIMAGE ENTERPRISEIMAGE := $(DEFAULTENTERPRISEIMAGE) endif @@ -121,19 +113,9 @@ endif BINNAME := $(PROJECT) BIN := $(BINDIR)/$(BINNAME) -TESTBINNAME := $(PROJECT)_test -TESTBIN := $(BINDIR)/$(TESTBINNAME) -DURATIONTESTBINNAME := $(PROJECT)_duration_test -DURATIONTESTBIN := $(BINDIR)/$(DURATIONTESTBINNAME) RELEASE := $(GOBUILDDIR)/bin/release GHRELEASE := $(GOBUILDDIR)/bin/github-release -TESTLENGTHOPTIONS := -test.short -TESTTIMEOUT := 30m -ifeq ($(LONG), 1) - TESTLENGTHOPTIONS := - TESTTIMEOUT := 300m -endif ifdef VERBOSE TESTVERBOSEOPTIONS := -v endif @@ -143,22 +125,6 @@ SOURCES_QUERY := find $(SRCDIR) -name '*.go' -type f -not -path '$(SRCDIR)/tests SOURCES := $(shell $(SOURCES_QUERY)) DASHBOARDSOURCES := $(shell find $(DASHBOARDDIR)/src -name '*.js' -not -path './test/*') $(DASHBOARDDIR)/package.json -ifndef ARANGOSYNCSRCDIR - ARANGOSYNCSRCDIR := $(SCRIPTDIR)/arangosync -endif -DOCKERARANGOSYNCCTRLFILE=tests/sync/Dockerfile -ifndef ARANGOSYNCTESTCTRLIMAGE - ARANGOSYNCTESTCTRLIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync-test-ctrl$(IMAGESUFFIX) -endif -ifndef ARANGOSYNCTESTIMAGE - ARANGOSYNCTESTIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync-test$(IMAGESUFFIX) -endif -ifndef ARANGOSYNCIMAGE - ARANGOSYNCIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync$(IMAGESUFFIX) -endif -ARANGOSYNCTESTCTRLBINNAME := $(PROJECT)_sync_test_ctrl -ARANGOSYNCTESTCTRLBIN := $(BINDIR)/$(ARANGOSYNCTESTCTRLBINNAME) - .DEFAULT_GOAL := all .PHONY: all all: check-vars verify-generated build @@ -268,9 +234,6 @@ dashboard/assets.go: $(DASHBOARDSOURCES) $(DASHBOARDDIR)/Dockerfile.build .PHONY: bin bin: $(BIN) -.PHONY: test-bin -test-bin: $(TESTBIN) - $(BIN): $(SOURCES) dashboard/assets.go VERSION @mkdir -p $(BINDIR) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -installsuffix netgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o $(BIN) $(REPOPATH) @@ -381,78 +344,6 @@ run-unit-tests: $(SOURCES) $(REPOPATH)/pkg/util/validation \ $(REPOPATH)/pkg/backup/... -$(TESTBIN): $(GOBUILDDIR) $(SOURCES) - @mkdir -p $(BINDIR) - CGO_ENABLED=0 go test -c -installsuffix netgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o $(TESTBIN) $(REPOPATH)/tests - - -.PHONY: docker-test -docker-test: $(TESTBIN) - docker build --quiet -f $(DOCKERTESTFILE) -t $(TESTIMAGE) . - -.PHONY: run-upgrade-tests -run-upgrade-tests: - TESTOPTIONS="-test.run=TestUpgrade" make run-tests - -.PHONY: prepare-run-tests -prepare-run-tests: -ifdef PUSHIMAGES - docker push $(OPERATORIMAGE) -endif -ifneq ($(DEPLOYMENTNAMESPACE), default) - $(ROOTDIR)/scripts/kube_delete_namespace.sh $(DEPLOYMENTNAMESPACE) - kubectl create namespace $(DEPLOYMENTNAMESPACE) -endif - kubectl apply -f $(MANIFESTPATHCRD) - kubectl apply -f $(MANIFESTPATHSTORAGE) - kubectl apply -f $(MANIFESTPATHDEPLOYMENT) - kubectl apply -f $(MANIFESTPATHDEPLOYMENTREPLICATION) - kubectl apply -f $(MANIFESTPATHBACKUP) - kubectl apply -f $(MANIFESTPATHTEST) - $(ROOTDIR)/scripts/kube_create_storage.sh $(DEPLOYMENTNAMESPACE) - $(ROOTDIR)/scripts/kube_create_license_key_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(ENTERPRISELICENSE)' - $(ROOTDIR)/scripts/kube_create_backup_remote_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(TEST_REMOTE_SECRET)' - -.PHONY: run-tests -run-tests: docker-test -ifdef PUSHIMAGES - docker push $(OPERATORIMAGE) - docker push $(TESTIMAGE) -endif -ifneq ($(DEPLOYMENTNAMESPACE), default) - $(ROOTDIR)/scripts/kube_delete_namespace.sh $(DEPLOYMENTNAMESPACE) - kubectl create namespace $(DEPLOYMENTNAMESPACE) -endif - kubectl apply -f $(MANIFESTPATHCRD) - kubectl apply -f $(MANIFESTPATHSTORAGE) - kubectl apply -f $(MANIFESTPATHDEPLOYMENT) - kubectl apply -f $(MANIFESTPATHDEPLOYMENTREPLICATION) - kubectl apply -f $(MANIFESTPATHBACKUP) - kubectl apply -f $(MANIFESTPATHTEST) - $(ROOTDIR)/scripts/kube_create_storage.sh $(DEPLOYMENTNAMESPACE) - $(ROOTDIR)/scripts/kube_create_license_key_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(ENTERPRISELICENSE)' - $(ROOTDIR)/scripts/kube_create_backup_remote_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(TEST_REMOTE_SECRET)' - $(ROOTDIR)/scripts/kube_run_tests.sh $(DEPLOYMENTNAMESPACE) $(TESTIMAGE) "$(ARANGODIMAGE)" '$(ENTERPRISEIMAGE)' '$(TESTTIMEOUT)' '$(TESTLENGTHOPTIONS)' '$(TESTOPTIONS)' '$(TEST_REMOTE_REPOSITORY)' - -$(DURATIONTESTBIN): $(SOURCES) - CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o $(DURATIONTESTBINNAME) $(REPOPATH)/tests/duration - - -.PHONY: docker-duration-test -docker-duration-test: $(DURATIONTESTBIN) - docker build --quiet -f $(DOCKERDURATIONTESTFILE) -t $(DURATIONTESTIMAGE) . -ifdef PUSHIMAGES - docker push $(DURATIONTESTIMAGE) -endif - -.PHONY: cleanup-tests -cleanup-tests: - kubectl delete ArangoDeployment -n $(DEPLOYMENTNAMESPACE) --all - sleep 10 -ifneq ($(DEPLOYMENTNAMESPACE), default) - $(ROOTDIR)/scripts/kube_delete_namespace.sh $(DEPLOYMENTNAMESPACE) -endif - # Release building .PHONY: patch-readme @@ -470,16 +361,6 @@ patch-release: patch-readme patch-examples patch-chart: $(ROOTDIR)/scripts/patch_chart.sh "$(VERSION_MAJOR_MINOR_PATCH)" "$(OPERATORIMAGE)" -.PHONY: changelog -changelog: - docker run --rm \ - -e CHANGELOG_GITHUB_TOKEN=$(shell cat ~/.arangodb/github-token) \ - -v "$(ROOTDIR)":/usr/local/src/your-app \ - ferrarimarco/github-changelog-generator:1.14.3 \ - --user arangodb \ - --project kube-arangodb \ - --no-author - .PHONY: docker-push docker-push: docker ifneq ($(DOCKERNAMESPACE), arangodb) @@ -519,71 +400,6 @@ release-minor: $(RELEASE) release-major: $(RELEASE) GOPATH=$(GOBUILDDIR) $(RELEASE) -type=major -## Kubernetes utilities - -.PHONY: minikube-start -minikube-start: - minikube start --cpus=4 --memory=6144 - -.PHONY: delete-operator -delete-operator: - kubectl delete -f $(MANIFESTPATHTEST) --ignore-not-found - kubectl delete -f $(MANIFESTPATHDEPLOYMENT) --ignore-not-found - kubectl delete -f $(MANIFESTPATHDEPLOYMENTREPLICATION) --ignore-not-found - kubectl delete -f $(MANIFESTPATHBACKUP) --ignore-not-found - kubectl delete -f $(MANIFESTPATHSTORAGE) --ignore-not-found - kubectl delete -f $(MANIFESTPATHCRD) --ignore-not-found - -.PHONY: redeploy-operator -redeploy-operator: delete-operator manifests - kubectl apply -f $(MANIFESTPATHCRD) - kubectl apply -f $(MANIFESTPATHSTORAGE) - kubectl apply -f $(MANIFESTPATHDEPLOYMENT) - kubectl apply -f $(MANIFESTPATHDEPLOYMENTREPLICATION) - kubectl apply -f $(MANIFESTPATHBACKUP) - kubectl apply -f $(MANIFESTPATHTEST) - kubectl get pods - -## ArangoSync Tests - -$(ARANGOSYNCTESTCTRLBIN): $(GOBUILDDIR) $(SOURCES) - @mkdir -p $(BINDIR) - CGO_ENABLED=0 go build -installsuffix cgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o $(ARANGOSYNCTESTCTRLBIN) $(REPOPATH)/tests/sync - -.PHONY: check-sync-vars -check-sync-vars: -ifndef ARANGOSYNCSRCDIR - @echo ARANGOSYNCSRCDIR must point to the arangosync source directory - @exit 1 -endif -ifndef ARANGODIMAGE - @echo ARANGODIMAGE must point to the usable arangodb enterprise image - @exit 1 -endif -ifndef ENTERPRISELICENSE - @echo For tests using ArangoSync you most likely need the license key. Please set ENTERPRISELICENSE. - @exit 1 -endif - @echo Using ArangoSync source at $(ARANGOSYNCSRCDIR) - @echo Using ArangoDB image $(ARANGODIMAGE) - -.PHONY: docker-sync -docker-sync: check-sync-vars - SYNCIMAGE=$(ARANGOSYNCIMAGE) TESTIMAGE=$(ARANGOSYNCTESTIMAGE) $(MAKE) -C $(ARANGOSYNCSRCDIR) docker docker-test - -.PHONY: -docker-sync-test-ctrl: $(ARANGOSYNCTESTCTRLBIN) - docker build --quiet -f $(DOCKERARANGOSYNCCTRLFILE) -t $(ARANGOSYNCTESTCTRLIMAGE) . - -.PHONY: -run-sync-tests: check-vars docker-sync docker-sync-test-ctrl prepare-run-tests -ifdef PUSHIMAGES - docker push $(ARANGOSYNCTESTCTRLIMAGE) - docker push $(ARANGOSYNCTESTIMAGE) - docker push $(ARANGOSYNCIMAGE) -endif - $(ROOTDIR)/scripts/kube_run_sync_tests.sh $(DEPLOYMENTNAMESPACE) '$(ARANGODIMAGE)' '$(ARANGOSYNCIMAGE)' '$(ARANGOSYNCTESTIMAGE)' '$(ARANGOSYNCTESTCTRLIMAGE)' '$(TESTOPTIONS)' - .PHONY: tidy tidy: @go mod tidy @@ -592,7 +408,7 @@ tidy: deps-reload: tidy init .PHONY: init -init: tools update-generated $(GHRELEASE) $(RELEASE) $(TESTBIN) $(BIN) vendor +init: tools update-generated $(GHRELEASE) $(RELEASE) $(BIN) vendor .PHONY: tools tools: update-vendor diff --git a/tests/acceptance/.gitignore b/tests/acceptance/.gitignore deleted file mode 100644 index b3330c0de..000000000 --- a/tests/acceptance/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -cluster1.yaml -cluster2.yaml -generated diff --git a/tests/acceptance/activefailover.template.yaml b/tests/acceptance/activefailover.template.yaml deleted file mode 100644 index 32d8acea6..000000000 --- a/tests/acceptance/activefailover.template.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-activefailover" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: ActiveFailover diff --git a/tests/acceptance/activefailover.yaml b/tests/acceptance/activefailover.yaml deleted file mode 100644 index 84eb32ad8..000000000 --- a/tests/acceptance/activefailover.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-activefailover" -spec: - mode: ActiveFailover - image: arangodb/arangodb:3.3.10 diff --git a/tests/acceptance/cluster-local-storage.template.yaml b/tests/acceptance/cluster-local-storage.template.yaml deleted file mode 100644 index 1f6b9136c..000000000 --- a/tests/acceptance/cluster-local-storage.template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - agents: - storageClassName: acceptance - dbservers: - storageClassName: acceptance diff --git a/tests/acceptance/cluster-scale-1.template.yaml b/tests/acceptance/cluster-scale-1.template.yaml deleted file mode 100644 index 04a93aecc..000000000 --- a/tests/acceptance/cluster-scale-1.template.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - dbservers: - count: 5 diff --git a/tests/acceptance/cluster-scale-2.template.yaml b/tests/acceptance/cluster-scale-2.template.yaml deleted file mode 100644 index 3562664ec..000000000 --- a/tests/acceptance/cluster-scale-2.template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - dbservers: - count: 5 - coordinators: - count: 4 diff --git a/tests/acceptance/cluster-scale-3.template.yaml b/tests/acceptance/cluster-scale-3.template.yaml deleted file mode 100644 index 3509d14b3..000000000 --- a/tests/acceptance/cluster-scale-3.template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - dbservers: - count: 2 - coordinators: - count: 4 diff --git a/tests/acceptance/cluster-scale-4.template.yaml b/tests/acceptance/cluster-scale-4.template.yaml deleted file mode 100644 index f3679a411..000000000 --- a/tests/acceptance/cluster-scale-4.template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - dbservers: - count: 2 - coordinators: - count: 1 diff --git a/tests/acceptance/cluster-sync.template.yaml b/tests/acceptance/cluster-sync.template.yaml deleted file mode 100644 index 0358ee95d..000000000 --- a/tests/acceptance/cluster-sync.template.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - sync: - enabled: true - externalAccess: - type: LoadBalancer - accessPackageSecretNames: ["src-accesspackage"] diff --git a/tests/acceptance/cluster-sync.yaml b/tests/acceptance/cluster-sync.yaml deleted file mode 100644 index 25cd357bb..000000000 --- a/tests/acceptance/cluster-sync.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - mode: Cluster - image: - sync: - enabled: true diff --git a/tests/acceptance/cluster-sync2.template.yaml b/tests/acceptance/cluster-sync2.template.yaml deleted file mode 100644 index bfeb39651..000000000 --- a/tests/acceptance/cluster-sync2.template.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster2" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster - sync: - enabled: true - externalAccess: - type: LoadBalancer diff --git a/tests/acceptance/cluster.template.yaml b/tests/acceptance/cluster.template.yaml deleted file mode 100644 index 94ab5cc6e..000000000 --- a/tests/acceptance/cluster.template.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Cluster diff --git a/tests/acceptance/cluster.yaml b/tests/acceptance/cluster.yaml deleted file mode 100644 index ad0797765..000000000 --- a/tests/acceptance/cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - mode: Cluster - image: arangodb/arangodb:3.3.10 diff --git a/tests/acceptance/cluster12-replication.yaml b/tests/acceptance/cluster12-replication.yaml deleted file mode 100644 index df431e13c..000000000 --- a/tests/acceptance/cluster12-replication.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: "replication.database.arangodb.com/v1alpha" -kind: "ArangoDeploymentReplication" -metadata: - name: "cluster12-replication" -spec: - source: - deploymentName: cluster1 - auth: - keyfileSecretName: cluster1-to-2-auth - tls: - caSecretName: cluster1-to-2-ca - destination: - deploymentName: cluster2 diff --git a/tests/acceptance/generate.sh b/tests/acceptance/generate.sh deleted file mode 100755 index b922a5909..000000000 --- a/tests/acceptance/generate.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -set -u - -version="arangodb-preview:3.4.0-rc.3" -enterprise_secret="$ARANGO_EP_SECRET" #only the number -community="arangodb/$version" -enterprise="registry.arangodb.com/arangodb/$version-$enterprise_secret" -community="neunhoef/arangodb:3.4" -enterprise="neunhoef/arangodb:3.4" - -rm -fr generated -mkdir -p generated - -for path in *.template.yaml; do - base_file="${path%.template.yaml}" - target="./generated/$base_file-community-dev.yaml" - cp "$path" "$target" - sed -i "s|@IMAGE@|$community|" "$target" - sed -i "s|@ENVIRONMENT@|Development|" "$target" - echo "created $target" -done - -for path in *.template.yaml; do - base_file="${path%.template.yaml}" - target="./generated/$base_file-community-pro.yaml" - cp "$path" "$target" - sed -i "s|@IMAGE@|$community|" "$target" - sed -i "s|@ENVIRONMENT@|Production|" "$target" - echo "created $target" -done - -for path in *.template.yaml; do - base_file="${path%.template.yaml}" - target="./generated/$base_file-enterprise-dev.yaml" - cp "$path" "$target" - sed -i "s|@IMAGE@|$enterprise|" "$target" - sed -i "s|@ENVIRONMENT@|Development|" "$target" - echo "created $target" -done - -for path in *.template.yaml; do - base_file="${path%.template.yaml}" - target="./generated/$base_file-enterprise-pro.yaml" - cp "$path" "$target" - sed -i "s|@IMAGE@|$enterprise|" "$target" - sed -i "s|@ENVIRONMENT@|Production|" "$target" - echo "created $target" -done diff --git a/tests/acceptance/local-storage.template.yaml b/tests/acceptance/local-storage.template.yaml deleted file mode 100644 index 569221d93..000000000 --- a/tests/acceptance/local-storage.template.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: "storage.arangodb.com/v1alpha" -kind: "ArangoLocalStorage" -metadata: - name: "acceptance-local-storage" -spec: - storageClass: - name: acceptance - localPath: - - /var/lib/acceptance-test diff --git a/tests/acceptance/local-storage.yaml b/tests/acceptance/local-storage.yaml deleted file mode 100644 index 569221d93..000000000 --- a/tests/acceptance/local-storage.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: "storage.arangodb.com/v1alpha" -kind: "ArangoLocalStorage" -metadata: - name: "acceptance-local-storage" -spec: - storageClass: - name: acceptance - localPath: - - /var/lib/acceptance-test diff --git a/tests/acceptance/semiautomation/README.md b/tests/acceptance/semiautomation/README.md deleted file mode 100644 index 3c2ddd7d6..000000000 --- a/tests/acceptance/semiautomation/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Semiautomation for the acceptance test - -This is a collection of tools to perform the acceptance test faster. - -## Prerequisites - - - k8s cluster set up with `kubectl` - - `fish` shell installed - - `curl` installed - - Obi's generated templates in a subdirectory called `generated` - -## Usage - -Execute the tests like this: - - ./test1a.fish - -and follow the instructions. diff --git a/tests/acceptance/semiautomation/activefailover.yaml b/tests/acceptance/semiautomation/activefailover.yaml deleted file mode 100644 index c32869a90..000000000 --- a/tests/acceptance/semiautomation/activefailover.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-activefailover" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - license: - secretName: arangodb-license-key - mode: ActiveFailover diff --git a/tests/acceptance/semiautomation/cluster-local-storage.yaml b/tests/acceptance/semiautomation/cluster-local-storage.yaml deleted file mode 100644 index ac728f37e..000000000 --- a/tests/acceptance/semiautomation/cluster-local-storage.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - mode: Cluster - license: - secretName: arangodb-license-key - agents: - storageClassName: acceptance - dbservers: - storageClassName: acceptance diff --git a/tests/acceptance/semiautomation/cluster-sync.yaml b/tests/acceptance/semiautomation/cluster-sync.yaml deleted file mode 100644 index 8f42e2ed9..000000000 --- a/tests/acceptance/semiautomation/cluster-sync.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - mode: Cluster - license: - secretName: arangodb-license-key - sync: - enabled: true - externalAccess: - type: LoadBalancer - accessPackageSecretNames: ["src-accesspackage"] diff --git a/tests/acceptance/semiautomation/cluster-sync1.yaml b/tests/acceptance/semiautomation/cluster-sync1.yaml deleted file mode 100644 index b587fa641..000000000 --- a/tests/acceptance/semiautomation/cluster-sync1.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster1" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - mode: Cluster - license: - secretName: arangodb-license-key - sync: - enabled: true - externalAccess: - type: LoadBalancer - accessPackageSecretNames: ["src-accesspackage"] - masterEndpoint: ["https://src-sync.9hoeffer.de:8629"] diff --git a/tests/acceptance/semiautomation/cluster-sync2.yaml b/tests/acceptance/semiautomation/cluster-sync2.yaml deleted file mode 100644 index 7472c9f86..000000000 --- a/tests/acceptance/semiautomation/cluster-sync2.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster2" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - mode: Cluster - license: - secretName: arangodb-license-key - sync: - enabled: true - externalAccess: - type: LoadBalancer diff --git a/tests/acceptance/semiautomation/cluster.yaml b/tests/acceptance/semiautomation/cluster.yaml deleted file mode 100644 index 9c43617fe..000000000 --- a/tests/acceptance/semiautomation/cluster.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-cluster" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - license: - secretName: arangodb-license-key - mode: Cluster diff --git a/tests/acceptance/semiautomation/helper.fish b/tests/acceptance/semiautomation/helper.fish deleted file mode 100644 index 17872c06b..000000000 --- a/tests/acceptance/semiautomation/helper.fish +++ /dev/null @@ -1,136 +0,0 @@ -function printheader - echo "Test : $TESTNAME" - echo "Description : $TESTDESC" - echo "Yaml file : $YAMLFILE" - echo "Deployment name : $DEPLOYMENT" - echo -end - -function waitForKubectl - if test (count $argv) -lt 5 - return 1 - end - set -l op (string split -- " " $argv[1]) - set -l select $argv[2] - set -l good (string split -- ";" "$argv[3]") - set -l expected $argv[4] - set -l timeout (math "$argv[5]" \* "$TIMEOUT") - - echo - echo "Testing `kubectl $op`" - echo " for occurrences of `$select`" - echo " that are `$good`, expecting `$expected`" - echo - - set -l t 0 - while true - set -l l (kubectl $op | grep $select) - set -l nfound (count $l) - set -l ngood 0 - for line in $l - if string match -r $good $line > /dev/null - set ngood (math $ngood + 1) - end - end - echo -n "Good=$ngood, found=$nfound, expected=$expected, try $t ($timeout)" - echo -n -e "\r" - if test $ngood -eq $expected -a $nfound -eq $expected ; echo ; return 0 ; end - if test $t -gt $timeout ; echo ; echo Timeout ; return 2 ; end - set t (math $t + 1) - sleep 1 - end -end - -function output - if test -n "$SAY" - eval $SAY $argv[1] > /dev/null ^ /dev/null - end - echo - for l in $argv[2..-1] ; echo $l ; end -end - -function ensureLicenseKey - if test -z "$ARANGO_LICENSE_KEY" - echo "Need ARANGO_LICENSE_KEY for enterprise image." - exit 1 - end - kubectl get secret arangodb-license-key ; or kubectl create secret generic arangodb-license-key \ - --from-literal=token="$ARANGO_LICENSE_KEY" > /dev/null -end - -function log - echo "$argv[1] Test: $TESTNAME, Desc: $TESTDESC" >> testprotocol.log -end - -function inputAndLogResult - read -P "Test result: " result - log $result - echo -end - -function waitForUser - read -P "Hit enter to continue" -end - -function getLoadBalancerIP - set var (kubectl get service $argv[1] -o=json | jq .status.loadBalancer.ingress[0]) - set key (echo $var | jq -r keys[0]) - echo $var | jq -r .$key -end - -function testArangoDB - set -l ip $argv[1] - set -l timeout (math "$argv[2]" \* "$TIMEOUT") - set -l n 0 - echo Waiting for ArangoDB to be ready... - while true - if set v (curl -k -s -m 3 "https://$ip:8529/_api/version" --user root: | jq .server) - if test "$v" = '"arango"' ; return 0 ; end - end - set n (math $n + 1) - if test "$n" -gt "$timeout" - echo Timeout - return 1 - end - echo Waiting "$n($timeout)"... - sleep 1 - end -end - -function fail - output "Failed" $argv - exit 1 -end - -function patchYamlFile - set -l YAMLFILE $argv[1] - set -l IMAGE $argv[2] - set -l ENVIRONMENT $argv[3] - set -l RESULT $argv[4] - cp "$YAMLFILE" "$RESULT" - sed -i "s|@IMAGE@|$IMAGE|" "$RESULT" - sed -i "s|@ENVIRONMENT@|$ENVIRONMENT|" "$RESULT" - if test -z "$DISABLEIPV6" - sed -i "s|@DISABLEIPV6@|false|" "$RESULT" - else - sed -i "s|@DISABLEIPV6@|true|" "$RESULT" - end -end - -function checkImages - if test -z "$ARANGODB_COMMUNITY" -o -z "$ARANGODB_ENTERPRISE" - echo "Need ARANGODB_COMMUNITY and ARANGODB_ENTERPRISE." - exit 1 - end -end - -if test -z "$TIMEOUT" - set -xg TIMEOUT 60 -end - -if test -z "$SAY" - if which say > /dev/null - set -xg SAY say - end -end - diff --git a/tests/acceptance/semiautomation/local-storage.yaml b/tests/acceptance/semiautomation/local-storage.yaml deleted file mode 100644 index 569221d93..000000000 --- a/tests/acceptance/semiautomation/local-storage.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: "storage.arangodb.com/v1alpha" -kind: "ArangoLocalStorage" -metadata: - name: "acceptance-local-storage" -spec: - storageClass: - name: acceptance - localPath: - - /var/lib/acceptance-test diff --git a/tests/acceptance/semiautomation/replication.yaml b/tests/acceptance/semiautomation/replication.yaml deleted file mode 100644 index 8ee60f328..000000000 --- a/tests/acceptance/semiautomation/replication.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: "replication.database.arangodb.com/v1alpha" -kind: "ArangoDeploymentReplication" -metadata: - name: "replication-internal" -spec: - source: - masterEndpoint: ["https://src-sync.9hoeffer.de:8629"] - auth: - keyfileSecretName: src-accesspackage-auth - tls: - caSecretName: src-accesspackage-ca - destination: - deploymentName: "acceptance-cluster2" diff --git a/tests/acceptance/semiautomation/single.yaml b/tests/acceptance/semiautomation/single.yaml deleted file mode 100644 index 62115f8fc..000000000 --- a/tests/acceptance/semiautomation/single.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-single" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - disableIPv6: @DISABLEIPV6@ - externalAccess: - type: LoadBalancer - license: - secretName: arangodb-license-key - mode: Single diff --git a/tests/acceptance/semiautomation/test1a.fish b/tests/acceptance/semiautomation/test1a.fish deleted file mode 100755 index 80da9c55b..000000000 --- a/tests/acceptance/semiautomation/test1a.fish +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test1a -set -g TESTDESC "Deployment of mode single (development)" -set -g YAMLFILE single.yaml -set -g DEPLOYMENT acceptance-single -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1b.fish b/tests/acceptance/semiautomation/test1b.fish deleted file mode 100755 index 44abd6bfa..000000000 --- a/tests/acceptance/semiautomation/test1b.fish +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test1b -set -g TESTDESC "Deployment of mode active/failover (development)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1c.fish b/tests/acceptance/semiautomation/test1c.fish deleted file mode 100755 index 356c41337..000000000 --- a/tests/acceptance/semiautomation/test1c.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test1c -set -g TESTDESC "Deployment of mode cluster (development, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test1d.fish b/tests/acceptance/semiautomation/test1d.fish deleted file mode 100755 index 8cc87807d..000000000 --- a/tests/acceptance/semiautomation/test1d.fish +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test1d -set -g TESTDESC "Deployment of mode cluster with sync (development, enterprise)" -set -g YAMLFILE cluster-sync.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2a.fish b/tests/acceptance/semiautomation/test2a.fish deleted file mode 100755 index 608289c5c..000000000 --- a/tests/acceptance/semiautomation/test2a.fish +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test2a -set -g TESTDESC "Scale an active failover deployment (enterprise, development)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Scale up the deployment -output "Next" "Patching Spec for Scaling up" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":3}]' -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 6 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 2 2 -or fail "Patched deployment did not get ready." - -# Scale down the deployment -output "Next" "Patching Spec for Scaling down" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/single/count", "value":2}]' -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -or fail "Patched deployment did not get ready." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test2b.fish b/tests/acceptance/semiautomation/test2b.fish deleted file mode 100755 index 58d509ffa..000000000 --- a/tests/acceptance/semiautomation/test2b.fish +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test2b -set -g TESTDESC "Scale a cluster deployment (development, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Patching -output "Scaling db servers up" "Patching Spec for Scaling up DBservers" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":5}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 11 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":4}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 12 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 4 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":1}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 6 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 1 2 -or fail "Deployment did not get ready." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3a.fish b/tests/acceptance/semiautomation/test3a.fish deleted file mode 100755 index e51b0cbff..000000000 --- a/tests/acceptance/semiautomation/test3a.fish +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test3a -set -g TESTDESC "Deployment of mode single (production)" -set -g YAMLFILE single.yaml -set -g DEPLOYMENT acceptance-single -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3b.fish b/tests/acceptance/semiautomation/test3b.fish deleted file mode 100755 index 9605fc867..000000000 --- a/tests/acceptance/semiautomation/test3b.fish +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test3b -set -g TESTDESC "Deployment of mode active/failover (production)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3c.fish b/tests/acceptance/semiautomation/test3c.fish deleted file mode 100755 index 3ad1e2938..000000000 --- a/tests/acceptance/semiautomation/test3c.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test3c -set -g TESTDESC "Deployment of mode cluster (production, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test3d.fish b/tests/acceptance/semiautomation/test3d.fish deleted file mode 100755 index 062f3520e..000000000 --- a/tests/acceptance/semiautomation/test3d.fish +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test3d -set -g TESTDESC "Scale a cluster deployment (production, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Patching -output "Scaling dbservers down" "Patching Spec for Scaling down dbservers" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":2}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling coordinators down" "Patching Spec for Scaling down coordinators" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":2}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 7 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 2 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling db servers up" "Patching Spec for Scaling up DBservers" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/dbservers/count", "value":3}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 8 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 2 2 -or fail "Deployment did not get ready." - -# Patching -output "Scaling coordinators up" "Patching Spec for Scaling up coordinators" -kubectl patch arango $DEPLOYMENT --type='json' -p='[{"op": "replace", "path": "/spec/coordinators/count", "value":3}]' -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 9 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -or fail "Deployment did not get ready." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4a.fish b/tests/acceptance/semiautomation/test4a.fish deleted file mode 100755 index 98f2042af..000000000 --- a/tests/acceptance/semiautomation/test4a.fish +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test4a -set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" -set -g YAMLFILE cluster-local-storage.yaml -set -g YAMLFILESTORAGE local-storage.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy local storage: -kubectl apply -f $YAMLFILESTORAGE -and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 1 -or fail "Local storage could not be deployed." - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *acceptance" 6 2 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -kubectl delete -f $YAMLFILESTORAGE -kubectl delete storageclass acceptance -waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 2 -or fail "Could not delete deployed storageclass." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test4b.fish b/tests/acceptance/semiautomation/test4b.fish deleted file mode 100755 index 116568bb5..000000000 --- a/tests/acceptance/semiautomation/test4b.fish +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test4b -set -g TESTDESC "Deployment of mode cluster (development, enterprise, local storage)" -set -g YAMLFILE cluster.yaml -set -g YAMLFILESTORAGE local-storage.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy local storage: -kubectl apply -f $YAMLFILESTORAGE -and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 1 -or fail "Local storage could not be deployed." - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *standard" 6 2 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -kubectl delete -f $YAMLFILESTORAGE -kubectl delete storageclass acceptance -waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 2 -or fail "Could not delete deployed storageclass." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5a.fish b/tests/acceptance/semiautomation/test5a.fish deleted file mode 100755 index 2d451cb0d..000000000 --- a/tests/acceptance/semiautomation/test5a.fish +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test5a -set -g TESTDESC "Pod resilience in mode single (production)" -set -g YAMLFILE single.yaml -set -g DEPLOYMENT acceptance-single -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and kill the single server pod." "Wait until it comes back and then see if the data is still there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5b.fish b/tests/acceptance/semiautomation/test5b.fish deleted file mode 100755 index 8fcbf247f..000000000 --- a/tests/acceptance/semiautomation/test5b.fish +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test5b -set -g TESTDESC "Pod resilience in active/failover (production)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, kill one single server pod after another." "They should come back, service should continue." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test5c.fish b/tests/acceptance/semiautomation/test5c.fish deleted file mode 100755 index 5843afee2..000000000 --- a/tests/acceptance/semiautomation/test5c.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test5c -set -g TESTDESC "Pod resilience in mode cluster (production, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, kill one pod after another with enough time in between." "They should come back, service should continue." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6a.fish b/tests/acceptance/semiautomation/test6a.fish deleted file mode 100755 index be901578d..000000000 --- a/tests/acceptance/semiautomation/test6a.fish +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6a -set -g TESTDESC "Node resilience in mode single (production)" -set -g YAMLFILE single.yaml -set -g DEPLOYMENT acceptance-single -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and reboot node the single pod is running on." "Wait until it comes back and then see if the data is still there and the server is responsive." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6b.fish b/tests/acceptance/semiautomation/test6b.fish deleted file mode 100755 index 408320d4e..000000000 --- a/tests/acceptance/semiautomation/test6b.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6b -set -g TESTDESC "Node resilience in active/failover (production)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, reboot the node on which the ready single server pod resides." "The node and pod should come back, service should be uninterrupted." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6c.fish b/tests/acceptance/semiautomation/test6c.fish deleted file mode 100755 index ac46559b0..000000000 --- a/tests/acceptance/semiautomation/test6c.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6c -set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, reboot nodes one after another with enough time in between." "They should come back, service should not be interrupted." "Even writes should be possible during the restart." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6d.fish b/tests/acceptance/semiautomation/test6d.fish deleted file mode 100755 index f5399f665..000000000 --- a/tests/acceptance/semiautomation/test6d.fish +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6d -set -g TESTDESC "Node resilience in mode single (production)" -set -g YAMLFILE single.yaml -set -g DEPLOYMENT acceptance-single -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-sngl" "1/1 *Running" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in and remove the node the single pod is running on." "Wait until a replacement is back." "This can only work with network attached storage." "Then see if the data is still there and the new server is responsive." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT-sngl "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6e.fish b/tests/acceptance/semiautomation/test6e.fish deleted file mode 100755 index ca9535b7d..000000000 --- a/tests/acceptance/semiautomation/test6e.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6e -set -g TESTDESC "Node resilience in active/failover (production)" -set -g YAMLFILE activefailover.yaml -set -g DEPLOYMENT acceptance-activefailover -printheader - -patchYamlFile $YAMLFILE $ARANGODB_COMMUNITY Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" $DEPLOYMENT "1 *Running" 5 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*1/1 *Running" "" 1 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sngl.*0/1 *Running" "" 1 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in." "Then, remove the node on which the ready single server pod resides." "The node and pod should come back (on a different machine)." "The service should be uninterrupted." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6f.fish b/tests/acceptance/semiautomation/test6f.fish deleted file mode 100755 index 71d8ab12c..000000000 --- a/tests/acceptance/semiautomation/test6f.fish +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6f -set -g TESTDESC "Node resilience in mode cluster (production, enterprise)" -set -g YAMLFILE cluster.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." "Furthermore, put some data in with replication factor 2." "Then, remove a node." "Pods should come back, service should not be interrupted." "Even writes should be possible during the redeployment." "All data must still be there." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test6g.fish b/tests/acceptance/semiautomation/test6g.fish deleted file mode 100755 index 3b0e50d6f..000000000 --- a/tests/acceptance/semiautomation/test6g.fish +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test6g -set -g TESTDESC "Node resilience in mode cluster (development, enterprise, local storage)" -set -g YAMLFILE cluster-local-storage.yaml -set -g YAMLFILESTORAGE local-storage.yaml -set -g DEPLOYMENT acceptance-cluster -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Development work.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy local storage: -kubectl apply -f $YAMLFILESTORAGE -and waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 1 1 -or fail "Local storage could not be deployed." - -# Deploy and check -kubectl apply -f work.yaml -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get pvc" "$DEPLOYMENT" "RWO *acceptance" 6 2 -or fail "Deployment did not get ready." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB was not reachable." - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then put some data in there in different collections, some with" "replicationFactor set to 1 and some set to 2." "Then cordon off a node running a dbserver pod and delete the pod." "Service (including writes) must continue, except for the collection without" "replication. It should be possible to drop that collection and eventually" "remove the dbserver. A new dbserver should come up on a different node" "after some time." -inputAndLogResult - -# Cleanup -kubectl delete -f work.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -or fail "Could not delete deployment." - -kubectl delete -f $YAMLFILESTORAGE -kubectl delete storageclass acceptance -waitForKubectl "get storageclass" "acceptance.*arangodb.*localstorage" "" 0 2 -or fail "Could not delete deployed storageclass." - -output "Ready" "" diff --git a/tests/acceptance/semiautomation/test7a.fish b/tests/acceptance/semiautomation/test7a.fish deleted file mode 100755 index 880d70d53..000000000 --- a/tests/acceptance/semiautomation/test7a.fish +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/fish - -source helper.fish -checkImages - -set -g TESTNAME test7a -set -g TESTDESC "Deployment of 2 clusters with sync with DC2DC (production, enterprise)" -set -g YAMLFILE cluster-sync1.yaml -set -g YAMLFILE2 cluster-sync2.yaml -set -g DEPLOYMENT acceptance-cluster1 -set -g DEPLOYMENT2 acceptance-cluster2 -printheader - -patchYamlFile $YAMLFILE $ARANGODB_ENTERPRISE Production work.yaml -patchYamlFile $YAMLFILE2 $ARANGODB_ENTERPRISE Production work2.yaml -cp replication.yaml work3.yaml - -# Ensure enterprise license key -ensureLicenseKey - -# Deploy and check -kubectl apply -f work.yaml -kubectl apply -f work2.yaml -and waitForKubectl "get pod" "$DEPLOYMENT" "1/1 *Running" 15 2 -and waitForKubectl "get pod" "$DEPLOYMENT-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-syma" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT-sywo" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get service" "$DEPLOYMENT-sync *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get pod" "$DEPLOYMENT2" "1/1 *Running" 15 2 -and waitForKubectl "get pod" "$DEPLOYMENT2-prmr" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT2-agnt" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT2-crdn" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT2-syma" "1/1 *Running" 3 2 -and waitForKubectl "get pod" "$DEPLOYMENT2-sywo" "1/1 *Running" 3 2 -and waitForKubectl "get service" "$DEPLOYMENT2 *ClusterIP" 8529 1 2 -and waitForKubectl "get service" "$DEPLOYMENT2-ea *LoadBalancer" "-v;pending" 1 3 -and waitForKubectl "get service" "$DEPLOYMENT2-sync *LoadBalancer" "-v;pending" 1 3 -or fail "Deployment did not get ready." - -# Deploy secrets separately for sync to pick them up: -kubectl get secret src-accesspackage --template='{{index .data "accessPackage.yaml"}}' | base64 -d > accessPackage.yaml -and kubectl apply -f accessPackage.yaml -or fail "Could not redeploy secrets for replication auth." - -# Automatic check -set ip (getLoadBalancerIP "$DEPLOYMENT-ea") -testArangoDB $ip 2 -or fail "ArangoDB (1) was not reachable." - -set ip2 (getLoadBalancerIP "$DEPLOYMENT2-ea") -testArangoDB $ip2 2 -or fail "ArangoDB (2) was not reachable." - -set ip3 (getLoadBalancerIP "$DEPLOYMENT-sync") -sed -i "s|@ADDRESS@|$ip3|" work3.yaml - -# Set up replication, rest is manual: -# run sed here on replication.yaml, find sync-ea first -kubectl apply -f work3.yaml - -# Manual check -output "Work" "Now please check external access on this URL with your browser:" " https://$ip:8529/" "then type the outcome followed by ENTER." -inputAndLogResult - -# Cleanup -kubectl delete -f work3.yaml -sleep 15 -kubectl delete -f work.yaml -kubectl delete -f work2.yaml -waitForKubectl "get pod" $DEPLOYMENT "" 0 2 -waitForKubectl "get pod" $DEPLOYMENT2 "" 0 2 -or fail "Could not delete deployment." - -output "Ready" "" diff --git a/tests/acceptance/single.template.yaml b/tests/acceptance/single.template.yaml deleted file mode 100644 index 58b5a6b74..000000000 --- a/tests/acceptance/single.template.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-single" -spec: - environment: @ENVIRONMENT@ - image: @IMAGE@ - externalAccess: - type: LoadBalancer - mode: Single diff --git a/tests/acceptance/single.yaml b/tests/acceptance/single.yaml deleted file mode 100644 index fcedd778a..000000000 --- a/tests/acceptance/single.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: "database.arangodb.com/v1alpha" -kind: "ArangoDeployment" -metadata: - name: "acceptance-single" -spec: - mode: Single - image: arangodb/arangodb:3.3.10 diff --git a/tests/annotations_test.go b/tests/annotations_test.go deleted file mode 100644 index 3d575ccf7..000000000 --- a/tests/annotations_test.go +++ /dev/null @@ -1,342 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Adam Janikowski -// - -package tests - -import ( - "context" - "github.com/arangodb/kube-arangodb/pkg/util/collection" - "testing" - "time" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/dchest/uniuri" - "github.com/rs/zerolog/log" - "github.com/stretchr/testify/require" - core "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" -) - -func addAnnotation(t *testing.T, kubeClient kubernetes.Interface, arangoClient versioned.Interface, depl *api.ArangoDeployment, annotations map[string]string) { - object, err := arangoClient.DatabaseV1().ArangoDeployments(depl.GetNamespace()).Get(context.Background(), depl.GetName(), meta.GetOptions{}) - require.NoError(t, err) - - object.Spec.Annotations = annotations - object.Spec.Coordinators.Annotations = depl.Spec.Coordinators.Annotations - - _, err = arangoClient.DatabaseV1().ArangoDeployments(depl.GetNamespace()).Update(context.Background(), object, meta.UpdateOptions{}) - require.NoError(t, err) - - ensureAnnotations(t, kubeClient, object) -} - -func ensureAnnotationsTimeout(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) func() error { - return func() error { - if err := ensureSecretAnnotations(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - if err := ensurePDBAnnotation(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - if err := ensurePVCAnnotation(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - if err := ensureServiceAnnotation(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - if err := ensureServiceAccountAnnotation(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - if err := ensurePodAnnotations(t, client, depl); err == nil || !isInterrupt(err) { - return err - } - - return interrupt{} - } -} - -func ensureSecretAnnotations(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - secrets, err := k8sutil.GetSecretsForParent(client.CoreV1().Secrets(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - require.True(t, len(secrets) > 0) - for _, secret := range secrets { - if !collection.Compare(secret.GetAnnotations(), depl.Spec.Annotations) { - log.Info().Msgf("Annotations for Secret does not match on %s", secret.Name) - return nil - } - } - - return interrupt{} -} - -func getPodGroup(pod *core.Pod) api.ServerGroup { - if pod.Labels == nil { - return api.ServerGroupUnknown - } - - return api.ServerGroupFromRole(pod.Labels[k8sutil.LabelKeyRole]) -} - -func ensurePodAnnotations(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - pods, err := k8sutil.GetPodsForParent(client.CoreV1().Pods(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - require.True(t, len(pods) > 0) - for _, pod := range pods { - group := getPodGroup(pod) - combinedAnnotations := collection.MergeAnnotations(depl.Spec.Annotations, depl.Spec.GetServerGroupSpec(group).Annotations) - if !collection.Compare(pod.GetAnnotations(), combinedAnnotations) { - log.Info().Msgf("Annotations for Pod does not match on %s", pod.Name) - return nil - } - } - - return interrupt{} -} - -func ensurePDBAnnotation(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - podDisruptionBudgets, err := k8sutil.GetPDBForParent(client.PolicyV1beta1().PodDisruptionBudgets(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - require.True(t, len(podDisruptionBudgets) > 0) - for _, podDisruptionBudget := range podDisruptionBudgets { - if !collection.Compare(podDisruptionBudget.GetAnnotations(), depl.Spec.Annotations) { - log.Info().Msgf("Annotations for PDB does not match on %s", podDisruptionBudget.Name) - return nil - } - } - - return interrupt{} -} - -func ensurePVCAnnotation(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - persistentVolumeClaims, err := k8sutil.GetPVCForParent(client.CoreV1().PersistentVolumeClaims(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - require.True(t, len(persistentVolumeClaims) > 0) - for _, persistentVolumeClaim := range persistentVolumeClaims { - if !collection.Compare(persistentVolumeClaim.GetAnnotations(), depl.Spec.Annotations) { - log.Info().Msgf("Annotations for PVC does not match on %s", persistentVolumeClaim.Name) - return nil - } - } - - return interrupt{} -} - -func ensureServiceAnnotation(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - services, err := k8sutil.GetServicesForParent(client.CoreV1().Services(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - require.True(t, len(services) > 0) - for _, service := range services { - if !collection.Compare(service.GetAnnotations(), depl.Spec.Annotations) { - log.Info().Msgf("Annotations for Service does not match on %s", service.Name) - return nil - } - } - - return interrupt{} -} - -func ensureServiceAccountAnnotation(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) error { - serviceAccounts, err := k8sutil.GetServiceAccountsForParent(client.CoreV1().ServiceAccounts(depl.Namespace), deployment.ArangoDeploymentResourceKind, depl.Name, depl.Namespace) - require.NoError(t, err) - for _, serviceAccount := range serviceAccounts { - if !collection.Compare(serviceAccount.GetAnnotations(), depl.Spec.Annotations) { - log.Info().Msgf("Annotations for Service Account does not match on %s", serviceAccount.Name) - return nil - } - } - - return interrupt{} -} - -func ensureAnnotations(t *testing.T, client kubernetes.Interface, depl *api.ArangoDeployment) { - if err := timeout(2*time.Second, 5*time.Minute, ensureAnnotationsTimeout(t, client, depl)); err != nil { - require.NoError(t, err) - } -} - -func TestAnnotations(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-annotations-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - depl, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, meta.CreateOptions{}) - require.NoError(t, err) - - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - t.Run("Add annotation", func(t *testing.T) { - annotations := map[string]string{ - "annotation": uniuri.NewLen(8), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - addAnnotation(t, kubecli, c, depl, nil) - }) - - t.Run("Add kubernetes annotation", func(t *testing.T) { - key := "kubernetes.io/test-only-annotation" - - annotations := map[string]string{ - key: uniuri.NewLen(8), - "annotation": uniuri.NewLen(8), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - addAnnotation(t, kubecli, c, depl, nil) - - secrets, err := k8sutil.GetSecretsForParent(kubecli.CoreV1().Secrets(depl.Namespace), - deployment.ArangoDeploymentResourceKind, - depl.Name, - depl.Namespace) - require.NoError(t, err) - require.True(t, len(secrets) > 0) - - for _, secret := range secrets { - require.NotNil(t, secret.Annotations) - - _, ok := secret.Annotations[key] - - require.True(t, ok) - } - }) - - t.Run("Add arangodb annotation", func(t *testing.T) { - key := "arangodb.com/test-only-annotation" - - annotations := map[string]string{ - key: uniuri.NewLen(8), - "annotation": uniuri.NewLen(8), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - addAnnotation(t, kubecli, c, depl, nil) - - secrets, err := k8sutil.GetSecretsForParent(kubecli.CoreV1().Secrets(depl.Namespace), - deployment.ArangoDeploymentResourceKind, - depl.Name, - depl.Namespace) - require.NoError(t, err) - require.True(t, len(secrets) > 0) - - for _, secret := range secrets { - require.NotNil(t, secret.Annotations) - - _, ok := secret.Annotations[key] - - require.True(t, ok) - } - }) - - t.Run("Replace annotation", func(t *testing.T) { - annotations := map[string]string{ - "annotation": uniuri.NewLen(8), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - annotations["annotation"] = uniuri.NewLen(16) - - addAnnotation(t, kubecli, c, depl, annotations) - - addAnnotation(t, kubecli, c, depl, nil) - }) - - t.Run("Add annotations", func(t *testing.T) { - annotations := map[string]string{ - "annotation": uniuri.NewLen(8), - "annotation2": uniuri.NewLen(16), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - addAnnotation(t, kubecli, c, depl, nil) - }) - - t.Run("Add annotations for group", func(t *testing.T) { - annotations := map[string]string{ - "annotation": uniuri.NewLen(8), - "annotation2": uniuri.NewLen(16), - } - - depl.Spec.Coordinators.Annotations = map[string]string{ - "coordinator-only": uniuri.NewLen(32), - "annotation": uniuri.NewLen(8), - } - - addAnnotation(t, kubecli, c, depl, annotations) - - pods, err := k8sutil.GetPodsForParent(kubecli.CoreV1().Pods(depl.Namespace), - deployment.ArangoDeploymentResourceKind, - depl.Name, - depl.Namespace) - require.NoError(t, err) - require.True(t, len(pods) > 0) - - for _, pod := range pods { - require.NotNil(t, pod.Annotations) - - value, ok := pod.Annotations["annotation"] - _, coordOnly := pod.Annotations["coordinator-only"] - - require.True(t, ok) - - if getPodGroup(pod) == api.ServerGroupCoordinators { - require.Equal(t, depl.Spec.Coordinators.Annotations["annotation"], value) - require.True(t, coordOnly) - } else { - require.Equal(t, annotations["annotation"], value) - require.False(t, coordOnly) - } - } - - depl.Spec.Coordinators.Annotations = nil - - addAnnotation(t, kubecli, c, depl, nil) - }) -} diff --git a/tests/auth_test.go b/tests/auth_test.go deleted file mode 100644 index 7bc410ef2..000000000 --- a/tests/auth_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020-2021 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// Author Tomasz Mielech -// - -package tests - -import ( - "context" - "strings" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/arangod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" -) - -// TestAuthenticationSingleDefaultSecret creating a single server -// with default authentication (on) using a generated JWT secret. -func TestAuthenticationSingleDefaultSecret(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-auth-sng-def-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Secret must now exist - if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil { - t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) - - // Secret must no longer exist - if err := waitUntilSecretNotFound(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Minute); err != nil { - t.Fatalf("JWT secret '%s' still found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } -} - -// TestAuthenticationSingleCustomSecret creating a single server -// with default authentication (on) using a user created JWT secret. -func TestAuthenticationSingleCustomSecret(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - secrets := kubecli.CoreV1().Secrets(ns) - - // Prepare deployment config - depl := newDeployment("test-auth-sng-cst-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.Authentication.JWTSecretName = util.NewString(strings.ToLower(uniuri.New())) - depl.Spec.SetDefaults(depl.GetName()) - - // Create secret - if err := k8sutil.CreateTokenSecret(context.Background(), secrets, depl.Spec.Authentication.GetJWTSecretName(), "foo", nil); err != nil { - t.Fatalf("Create JWT secret failed: %v", err) - } - defer removeSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) - - // Secret must still exist - if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil { - t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } -} - -// TestAuthenticationNoneSingle creating a single server -// with authentication set to `None`. -func TestAuthenticationNoneSingle(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-auth-none-sng-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.Authentication.JWTSecretName = util.NewString(api.JWTSecretNameDisabled) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := arangod.WithSkipAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestAuthenticationClusterDefaultSecret creating a cluster -// with default authentication (on) using a generated JWT secret. -func TestAuthenticationClusterDefaultSecret(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-auth-cls-def-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Secret must now exist - if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil { - t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) - - // Secret must no longer exist - if err := waitUntilSecretNotFound(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Minute); err != nil { - t.Fatalf("JWT secret '%s' still found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } -} - -// TestAuthenticationClusterCustomSecret creating a cluster -// with default authentication (on) using a user created JWT secret. -func TestAuthenticationClusterCustomSecret(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - secrets := kubecli.CoreV1().Secrets(ns) - - // Prepare deployment config - depl := newDeployment("test-auth-cls-cst-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Authentication.JWTSecretName = util.NewString(strings.ToLower(uniuri.New())) - depl.Spec.SetDefaults(depl.GetName()) - - // Create secret - if err := k8sutil.CreateTokenSecret(context.Background(), secrets, depl.Spec.Authentication.GetJWTSecretName(), "foo", nil); err != nil { - t.Fatalf("Create JWT secret failed: %v", err) - } - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) - - // Secret must still exist - if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil { - t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err) - } - - // Cleanup secret - removeSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns) -} - -// TestAuthenticationNoneCluster creating a cluster -// with authentication set to `None`. -func TestAuthenticationNoneCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-auth-none-cls-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Authentication.JWTSecretName = util.NewString(api.JWTSecretNameDisabled) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := arangod.WithSkipAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/backup_test.go b/tests/backup_test.go deleted file mode 100644 index a6ab235d8..000000000 --- a/tests/backup_test.go +++ /dev/null @@ -1,1252 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Lars Maier -// - -package tests - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/arangodb/kube-arangodb/pkg/backup/utils" - - backupClient "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/typed/backup/v1" - - "github.com/rs/zerolog/log" - "k8s.io/apimachinery/pkg/util/uuid" - - "github.com/stretchr/testify/require" - - "github.com/arangodb/go-driver" - backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/dchest/uniuri" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func waitUntilBackup(ci versioned.Interface, name, ns string, predicate func(*backupApi.ArangoBackup, error) error, timeout ...time.Duration) (*backupApi.ArangoBackup, error) { - var result *backupApi.ArangoBackup - op := func() error { - obj, err := ci.BackupV1().ArangoBackups(ns).Get(context.Background(), name, metav1.GetOptions{}) - result = obj - if predicate != nil { - if err := predicate(obj, err); err != nil { - return maskAny(err) - } - } - return nil - } - actualTimeout := deploymentReadyTimeout - if len(timeout) > 0 { - actualTimeout = timeout[0] - } - if err := retry.Retry(op, actualTimeout); err != nil { - return nil, maskAny(err) - } - return result, nil -} - -func backupIsReady(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - return err - } - - if backup.Status.State == backupApi.ArangoBackupStateReady { - return nil - } - - return fmt.Errorf("Backup not ready - status: %s", backup.Status.State) -} - -func backupIsUploaded(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - return err - } - - if backup.Status.Backup.Uploaded != nil && *backup.Status.Backup.Uploaded { - return nil - } - - return fmt.Errorf("Backup not ready - status: %s", backup.Status.State) -} - -func backupIsNotUploaded(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - return err - } - - if backup.Status.Backup.Uploaded == nil || !*backup.Status.Backup.Uploaded { - return nil - } - - return fmt.Errorf("Backup not ready - status: %s", backup.Status.State) -} - -func backupIsAvailable(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - return err - } - - if backup.Status.Available { - return nil - } - - return fmt.Errorf("Backup not available - status: %s", backup.Status.State) -} - -func backupIsNotAvailable(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - return err - } - - if !backup.Status.Available { - return nil - } - - return fmt.Errorf("Backup is still available - status: %s", backup.Status.State) -} - -func backupIsNotFound(backup *backupApi.ArangoBackup, err error) error { - if err != nil { - if k8sutil.IsNotFound(err) { - return nil - } - return err - } - - return fmt.Errorf("Backup resource still exists") -} - -type EnsureBackupOptions struct { - Options *backupApi.ArangoBackupSpecOptions - Download *backupApi.ArangoBackupSpecDownload - Upload *backupApi.ArangoBackupSpecOperation -} - -func newBackup(name, deployment string, options *EnsureBackupOptions) *backupApi.ArangoBackup { - backup := &backupApi.ArangoBackup{ - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(name), - Finalizers: []string{ - backupApi.FinalizerArangoBackup, - }, - }, - Spec: backupApi.ArangoBackupSpec{ - Deployment: backupApi.ArangoBackupSpecDeployment{ - Name: deployment, - }, - }, - } - - if options != nil { - backup.Spec.Options = options.Options - backup.Spec.Upload = options.Upload - backup.Spec.Download = options.Download - } - - return backup -} - -func newBackupPolicy(name, schedule string, labels map[string]string, options *EnsureBackupOptions) *backupApi.ArangoBackupPolicy { - policy := &backupApi.ArangoBackupPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(name), - Labels: labels, - }, - Spec: backupApi.ArangoBackupPolicySpec{ - DeploymentSelector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - - Schedule: schedule, - }, - } - - if options != nil { - policy.Spec.BackupTemplate.Options = options.Options - policy.Spec.BackupTemplate.Upload = options.Upload - } - - return policy -} - -func skipIfBackupUnavailable(t *testing.T, client driver.Client) { - err := utils.Retry(10, time.Second, func() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := client.Backup().List(ctx, nil); err != nil { - t.Logf("Backup API not yet ready: %s", err.Error()) - return err - } - - return nil - }) - - if err != nil { - t.Skipf("Backup API not available: %s", err.Error()) - } -} - -func statBackupMeta(client driver.Client, backupID driver.BackupID) (bool, driver.BackupMeta, error) { - - list, err := client.Backup().List(context.Background(), &driver.BackupListOptions{ID: backupID}) - if err != nil { - if driver.IsNotFound(err) { - return false, driver.BackupMeta{}, nil - } - - return false, driver.BackupMeta{}, err - } - - if meta, ok := list[backupID]; ok { - return true, meta, nil - } - - return false, driver.BackupMeta{}, fmt.Errorf("List does not contain backup") -} - -func ensureBackup(t *testing.T, deployment, ns string, deploymentClient versioned.Interface, predicate func(*backupApi.ArangoBackup, error) error, options *EnsureBackupOptions) (*backupApi.ArangoBackup, string, driver.BackupID) { - backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), deployment, options) - _, err := deploymentClient.BackupV1().ArangoBackups(ns).Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - name := backup.GetName() - - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, predicate) - require.NoError(t, err, "backup did not become available: %s", err) - var backupID string - if backup.Status.Backup != nil { - backupID = backup.Status.Backup.ID - } - return backup, name, driver.BackupID(backupID) -} - -func skipOrRemotePath(t *testing.T) { - repoPath := os.Getenv("TEST_REMOTE_REPOSITORY") - if repoPath == "" { - t.Skip("TEST_REMOTE_REPOSITORY not set") - } -} - -func newOperation() *backupApi.ArangoBackupSpecOperation { - return &backupApi.ArangoBackupSpecOperation{ - RepositoryURL: os.Getenv("TEST_REMOTE_REPOSITORY"), - CredentialsSecretName: testBackupRemoteSecretName, - } -} - -func newDownload(ID string) *backupApi.ArangoBackupSpecDownload { - return &backupApi.ArangoBackupSpecDownload{ - ArangoBackupSpecOperation: backupApi.ArangoBackupSpecOperation{ - RepositoryURL: os.Getenv("TEST_REMOTE_REPOSITORY"), - CredentialsSecretName: testBackupRemoteSecretName, - }, - ID: ID, - } -} - -func timeoutWaitForBackups(t *testing.T, backupClient backupClient.ArangoBackupInterface, labels metav1.LabelSelector, size int) func() error { - return func() error { - backups, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&labels)}) - if err != nil { - return err - } - - require.Len(t, backups.Items, size) - - done := 0 - - for _, backup := range backups.Items { - switch backup.Status.State { - case backupApi.ArangoBackupStateFailed: - log.Error().Str("backup", backup.Name).Str("Message", backup.Status.Message).Msg("Failed") - require.Fail(t, "Backup object failed", backup.Status.Message) - case backupApi.ArangoBackupStateReady: - done++ - } - } - - log.Info().Int("expected", size).Int("done", done).Msg("Iteration") - - if done == size { - return interrupt{} - } - - return nil - } -} - -func compareBackup(t *testing.T, meta driver.BackupMeta, backup *backupApi.ArangoBackup) { - require.NotNil(t, backup.Status.Backup) - require.Equal(t, meta.Version, backup.Status.Backup.Version) - require.True(t, meta.SizeInBytes > 0) - require.True(t, meta.NumberOfDBServers == 2) - require.True(t, meta.SizeInBytes == backup.Status.Backup.SizeInBytes) - require.True(t, meta.NumberOfDBServers == backup.Status.Backup.NumberOfDBServers) -} - -func TestBackupCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - deploymentClient := kubeArangoClient.MustNewClient() - ns := getNamespace(t) - - backupPolicyClient := deploymentClient.BackupV1().ArangoBackupPolicies(ns) - backupClient := deploymentClient.BackupV1().ArangoBackups(ns) - - cmd := []string{ - "--backup.api-enabled=jwt", - } - - // Prepare deployment config - deplLabels := map[string]string{ - "COMMON": "1", - "TEST": "1", - } - - depl := newDeployment("test-backup-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.DBServers.Count = util.NewInt(2) - depl.Spec.DBServers.Args = cmd - depl.Spec.Coordinators.Count = util.NewInt(2) - depl.Spec.Coordinators.Args = cmd - depl.Spec.Agents.Args = cmd - depl.Spec.SetDefaults(depl.GetName()) // this must be last - depl.Labels = deplLabels - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Prepare deployment config - depl2Labels := map[string]string{ - "COMMON": "1", - "TEST": "2", - } - - depl2 := newDeployment("test-backup-two-" + uniuri.NewLen(4)) - depl2.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl2.Spec.DBServers.Count = util.NewInt(2) - depl2.Spec.DBServers.Args = cmd - depl2.Spec.Coordinators.Count = util.NewInt(2) - depl2.Spec.Coordinators.Args = cmd - depl2.Spec.Agents.Args = cmd - depl2.Spec.SetDefaults(depl2.GetName()) // this must be last - depl2.Labels = depl2Labels - defer deferedCleanupDeployment(c, depl2.GetName(), ns) - - // Create deployment - apiObject, err := deploymentClient.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - defer removeDeployment(deploymentClient, depl.GetName(), ns) - require.NoError(t, err, "failed to create deployment: %s", err) - - api2Object, err := deploymentClient.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl2, metav1.CreateOptions{}) - defer removeDeployment(deploymentClient, depl2.GetName(), ns) - require.NoError(t, err, "failed to create deployment two: %s", err) - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - - _, err = waitUntilDeployment(deploymentClient, depl2.GetName(), ns, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment two not running in time: %s", err)) - - ctx := context.Background() - databaseClient := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - database2Client := mustNewArangodDatabaseClient(ctx, kubecli, api2Object, t, nil) - - skipIfBackupUnavailable(t, databaseClient) - skipIfBackupUnavailable(t, database2Client) - - deployments := []*api.ArangoDeployment{depl, depl2} - databaseClients := map[*api.ArangoDeployment]driver.Client{ - depl: databaseClient, - depl2: database2Client, - } - - t.Run("create-backups-on-multiple-databases", func(t *testing.T) { - size := 8 - expected := size * len(deployments) - labels := metav1.LabelSelector{ - MatchLabels: map[string]string{ - "type": string(uuid.NewUUID()), - }, - } - - for id := 0; id < size; id++ { - for _, deployment := range deployments { - backup := newBackup(fmt.Sprintf("my-backup-%s-%s", deployment.GetName(), uniuri.NewLen(4)), deployment.GetName(), nil) - - backup.Labels = labels.MatchLabels - - _, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - } - } - - err := timeout(time.Second, 30*time.Minute, timeoutWaitForBackups(t, backupClient, labels, expected)) - require.NoError(t, err) - }) - - t.Run("create backup", func(t *testing.T) { - backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil) - _, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsAvailable) - require.NoError(t, err, "backup did not become available: %s", err) - backupID := backup.Status.Backup.ID - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, driver.BackupID(backupID)) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - compareBackup(t, meta, backup) - }) - - t.Run("create-upload backup", func(t *testing.T) { - skipOrRemotePath(t) - - backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil) - _, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady) - require.NoError(t, err, "backup did not become available: %s", err) - backupID := backup.Status.Backup.ID - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, driver.BackupID(backupID)) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - compareBackup(t, meta, backup) - require.Nil(t, backup.Status.Backup.Uploaded) - require.Nil(t, backup.Status.Backup.Downloaded) - - t.Logf("Add upload") - // add upload part - currentBackup, err := backupClient.Get(context.Background(), backup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - currentBackup.Spec.Upload = newOperation() - - _, err = backupClient.Update(context.Background(), currentBackup, metav1.UpdateOptions{}) - require.NoError(t, err) - - // After backup went thru uploading phase wait for finnish - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - found, meta, err = statBackupMeta(databaseClient, driver.BackupID(backupID)) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - compareBackup(t, meta, backup) - require.NotNil(t, backup.Status.Backup.Uploaded, "Upload flag is nil") - require.Nil(t, backup.Status.Backup.Downloaded) - }) - - t.Run("create backup and delete", func(t *testing.T) { - backup, name, id := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, nil) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - compareBackup(t, meta, backup) - - // now remove the backup - backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotFound) - require.NoError(t, err, "Backup test failed: %s", err) - - // check that the actual backup has been deleted - found, _, _ = statBackupMeta(databaseClient, id) - require.False(t, found) - }) - - t.Run("remove backup locally", func(t *testing.T) { - backup, name, id := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, nil) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // now remove the backup locally - err := databaseClient.Backup().Delete(context.Background(), id) - require.NoError(t, err, "Failed to delete backup: %s", err) - - // wait for the backup to become unavailable - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotAvailable, 30*time.Second) - require.NoError(t, err, "Backup test failed: %s", err) - require.Equal(t, backupApi.ArangoBackupStateDeleted, backup.Status.State) - }) - - t.Run("handle existing backups", func(t *testing.T) { - // create a local backup manually - id, _, err := databaseClient.Backup().Create(context.Background(), nil) - require.NoError(t, err, "Creating backup failed: %s", err) - found, meta, err := statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - - // create a backup resource manually with that id - var backup *backupApi.ArangoBackup - err = timeout(3*time.Second, 2*time.Minute, func() error { - backups, err := backupClient.List(context.Background(), metav1.ListOptions{}) - if err != nil { - return err - } - - if len(backups.Items) == 0 { - return nil - } - - if len(backups.Items) > 1 { - return fmt.Errorf("Too many backups") - } - - backup = &backups.Items[0] - - return interrupt{} - }) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - - // wait until the backup becomes available - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsAvailable) - require.NoError(t, err, "backup did not become available: %s", err) - require.Equal(t, backupApi.ArangoBackupStateReady, backup.Status.State) - compareBackup(t, meta, backup) - require.NotNil(t, backup.Status.Backup.Imported) - require.True(t, *backup.Status.Backup.Imported) - }) - - t.Run("create-multiple-restore-cycle", func(t *testing.T) { - type Book struct { - Title string - Author string - } - - ctx := context.Background() - // first add collections, insert data into the cluster - dbname := "backup-test-db-two" - db, err := databaseClient.CreateDatabase(ctx, dbname, nil) - require.NoError(t, err, "failed to create database: %s", err) - - colname := "backup-test-col" - col, err := db.CreateCollection(ctx, colname, nil) - require.NoError(t, err, "failed to create collection: %s", err) - - meta1, err := col.CreateDocument(ctx, &Book{Title: "My first Go-Program", Author: "Adam"}) - require.NoError(t, err, "failed to create document: %s", err) - - // Now create a backups, a lot of them - size := 8 - labels := metav1.LabelSelector{ - MatchLabels: map[string]string{ - "type": string(uuid.NewUUID()), - }, - } - - for id := 0; id < size; id++ { - backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil) - - backup.Labels = labels.MatchLabels - - _, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - } - - err = timeout(time.Second, 5*time.Minute, timeoutWaitForBackups(t, backupClient, labels, size)) - - require.NoError(t, err) - - // Get first backup - backups, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&labels)}) - require.NoError(t, err) - require.Len(t, backups.Items, size) - - // Create backup from which we are gonna restore - backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil) - - backup.Labels = labels.MatchLabels - - _, err = backupClient.Create(context.Background(), backup, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create backup: %s", err) - defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{}) - - name := backup.Name - - _ = timeout(time.Second, 5*time.Minute, timeoutWaitForBackups(t, backupClient, labels, size+1)) - - // insert yet another document - meta2, err := col.CreateDocument(ctx, &Book{Title: "Bad book title", Author: "Lars"}) - require.NoError(t, err, "failed to create document: %s", err) - - // now restore the backup - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = util.NewString(name) - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore != nil { - result := status.Restore - - if result.RequestedFrom != name { - return fmt.Errorf("Wrong backup in RequestedFrom: %s, expected %s", result.RequestedFrom, name) - } - - if result.State == api.DeploymentRestoreStateRestoreFailed { - t.Fatalf("Failed to restore backup: %s", result.Message) - } - - if result.State == api.DeploymentRestoreStateRestored { - return nil - } - - return fmt.Errorf("Not yet restored - staate %s", result.State) - } - - return fmt.Errorf("Restore is not set on deployment") - }) - require.NoError(t, err, "Deployment did not restore in time: %s", err) - - // restore was completed, check if documents are there - found, err := col.DocumentExists(ctx, meta1.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.True(t, found) - - // second document should not exist - found, err = col.DocumentExists(ctx, meta2.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.False(t, found) - - // delete the RestoreFrom entry - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = nil - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - // wait for it to be deleted in the status - waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore == nil { - return nil - } - - return fmt.Errorf("Restore is not set to nil") - }) - - // Assert that all of the backups are in valid state - backups, err = backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&labels)}) - require.NoError(t, err) - require.Len(t, backups.Items, size+1) - - for _, b := range backups.Items { - require.Equal(t, backupApi.ArangoBackupStateReady, b.Status.State, b.Status.Message) - } - }) - - t.Run("create-restore-cycle", func(t *testing.T) { - type Book struct { - Title string - Author string - } - - ctx := context.Background() - // first add collections, insert data into the cluster - dbname := "backup-test-db" - db, err := databaseClient.CreateDatabase(ctx, dbname, nil) - require.NoError(t, err, "failed to create database: %s", err) - - colname := "backup-test-col" - col, err := db.CreateCollection(ctx, colname, nil) - require.NoError(t, err, "failed to create collection: %s", err) - - meta1, err := col.CreateDocument(ctx, &Book{Title: "My first Go-Program", Author: "Adam"}) - require.NoError(t, err, "failed to create document: %s", err) - - // Now create a backup - _, name, _ := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, nil) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // insert yet another document - meta2, err := col.CreateDocument(ctx, &Book{Title: "Bad book title", Author: "Lars"}) - require.NoError(t, err, "failed to create document: %s", err) - - // now restore the backup - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = util.NewString(name) - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore != nil { - result := status.Restore - - if result.RequestedFrom != name { - return fmt.Errorf("Wrong backup in RequestedFrom: %s, expected %s", result.RequestedFrom, name) - } - - if result.State == api.DeploymentRestoreStateRestoreFailed { - t.Fatalf("Failed to restore backup: %s", result.Message) - } - - if result.State == api.DeploymentRestoreStateRestored { - return nil - } - - return fmt.Errorf("Not yet restored - staate %s", result.State) - } - - return fmt.Errorf("Restore is not set on deployment") - }) - require.NoError(t, err, "Deployment did not restore in time: %s", err) - - // restore was completed, check if documents are there - found, err := col.DocumentExists(ctx, meta1.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.True(t, found) - - // second document should not exist - found, err = col.DocumentExists(ctx, meta2.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.False(t, found) - - // delete the RestoreFrom entry - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = nil - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - // wait for it to be deleted in the status - waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore == nil { - return nil - } - - return fmt.Errorf("Restore is not set to nil") - }) - - }) - - t.Run("restore-nonexistent", func(t *testing.T) { - // try to restore a backup that doesn not exist - name := "does-not-exist" - - _, err := updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = util.NewString(name) - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - depl, err := waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore != nil { - result := status.Restore - - if result.RequestedFrom != name { - return fmt.Errorf("Wrong backup in RequestedFrom: %s, expected %s", result.RequestedFrom, name) - } - - if result.State == api.DeploymentRestoreStateRestored { - t.Fatalf("Restore backup - not expected: %s", result.Message) - } - - if result.State == api.DeploymentRestoreStateRestoreFailed { - return nil - } - - return fmt.Errorf("Not yet restored - staate %s", result.State) - } - - return fmt.Errorf("Restore is not set on deployment") - }) - require.NoError(t, err, "Deployment did not restore in time: %s", err) - require.NotNil(t, depl.Status.Restore) - require.Equal(t, api.DeploymentRestoreStateRestoreFailed, depl.Status.Restore.State) - }) - - t.Run("upload", func(t *testing.T) { - skipOrRemotePath(t) - - // create backup with upload operation - backup, name, _ := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Upload: newOperation()}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup will be uploaded - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - require.NotNil(t, backup.Status.Backup) - require.NotNil(t, backup.Status.Backup.Uploaded) - require.Nil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Uploaded) - }) - - t.Run("re-upload", func(t *testing.T) { - skipOrRemotePath(t) - - // create backup with upload operation - backup, name, _ := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Upload: newOperation()}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup will be uploaded - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - require.NotNil(t, backup.Status.Backup) - require.NotNil(t, backup.Status.Backup.Uploaded) - require.Nil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Uploaded) - - // Remove upload option - currentBackup, err := backupClient.Get(context.Background(), backup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - currentBackup.Spec.Upload = nil - - _, err = backupClient.Update(context.Background(), currentBackup, metav1.UpdateOptions{}) - require.NoError(t, err) - - // Wait for uploaded flag to disappear - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - // Append again upload flag - currentBackup, err = backupClient.Get(context.Background(), backup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - currentBackup.Spec.Upload = newOperation() - - _, err = backupClient.Update(context.Background(), currentBackup, metav1.UpdateOptions{}) - require.NoError(t, err) - - // Wait for uploaded flag to appear - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - }) - - t.Run("upload-download-cycle", func(t *testing.T) { - skipOrRemotePath(t) - - // create backup with upload operation - backup, name, id := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Upload: newOperation()}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup will be uploaded - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, backup.Status.Backup.Version) - - require.NotNil(t, backup.Status.Backup) - require.NotNil(t, backup.Status.Backup.Uploaded) - require.Nil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Uploaded) - - // After all remove backup - backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotFound) - require.NoError(t, err, "Backup test failed: %s", err) - - // check that the actual backup has been deleted - found, _, _ = statBackupMeta(databaseClient, id) - require.False(t, found) - - // create backup with download operation - backup, name, _ = ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Download: newDownload(string(id))}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup becomes ready - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady) - require.NoError(t, err, "backup did not become ready: %s", err) - - // check that the backup is actually available - found, meta, err = statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, backup.Status.Backup.Version) - - require.NotNil(t, backup.Status.Backup) - require.Nil(t, backup.Status.Backup.Uploaded) - require.NotNil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Downloaded) - }) - - t.Run("upload-download-upload-cycle", func(t *testing.T) { - skipOrRemotePath(t) - - // create backup with upload operation - backup, name, id := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Upload: newOperation()}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup will be uploaded - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, backup.Status.Backup.Version) - - require.NotNil(t, backup.Status.Backup) - require.NotNil(t, backup.Status.Backup.Uploaded) - require.Nil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Uploaded) - - // After all remove backup - backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotFound) - require.NoError(t, err, "Backup test failed: %s", err) - - // check that the actual backup has been deleted - found, _, _ = statBackupMeta(databaseClient, id) - require.False(t, found) - - // create backup with download operation - backup, name, _ = ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Download: newDownload(string(id))}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup becomes ready - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady) - require.NoError(t, err, "backup did not become ready: %s", err) - - // check that the backup is actually available - found, meta, err = statBackupMeta(databaseClient, id) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, backup.Status.Backup.Version) - - require.NotNil(t, backup.Status.Backup) - require.Nil(t, backup.Status.Backup.Uploaded) - require.NotNil(t, backup.Status.Backup.Downloaded) - - require.True(t, *backup.Status.Backup.Downloaded) - - // Add again upload flag - currentBackup, err := backupClient.Get(context.Background(), backup.Name, metav1.GetOptions{}) - require.NoError(t, err) - - currentBackup.Spec.Upload = newOperation() - - _, err = backupClient.Update(context.Background(), currentBackup, metav1.UpdateOptions{}) - require.NoError(t, err) - - // Wait for uploaded flag to appear - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - }) - - t.Run("create-upload-download-restore-cycle", func(t *testing.T) { - skipOrRemotePath(t) - - type Book struct { - Title string - Author string - } - - ctx := context.Background() - // first add collections, insert data into the cluster - dbname := "backup-test-db-up-down" - db, err := databaseClient.CreateDatabase(ctx, dbname, nil) - require.NoError(t, err, "failed to create database: %s", err) - - colname := "backup-test-col" - col, err := db.CreateCollection(ctx, colname, nil) - require.NoError(t, err, "failed to create collection: %s", err) - - meta1, err := col.CreateDocument(ctx, &Book{Title: "My first Go-Program", Author: "Adam"}) - require.NoError(t, err, "failed to create document: %s", err) - - // Now create a backup - backup, name, id := ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Upload: newOperation()}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup becomes ready - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded) - require.NoError(t, err, "backup did not become ready: %s", err) - - // insert yet another document - meta2, err := col.CreateDocument(ctx, &Book{Title: "Bad book title", Author: "Lars"}) - require.NoError(t, err, "failed to create document: %s", err) - - // now remove the backup locally - err = databaseClient.Backup().Delete(context.Background(), id) - require.NoError(t, err, "Failed to delete backup: %s", err) - - // wait for the backup to become unavailable - backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotAvailable, 30*time.Second) - require.NoError(t, err, "Backup test failed: %s", err) - require.Equal(t, backupApi.ArangoBackupStateDeleted, backup.Status.State) - - // now remove the backup - backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsNotFound) - require.NoError(t, err, "Backup test failed: %s", err) - - // create backup with download operation - backup, name, _ = ensureBackup(t, depl.GetName(), ns, deploymentClient, backupIsAvailable, &EnsureBackupOptions{Download: newDownload(string(id))}) - defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{}) - - // wait until the backup becomes ready - _, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady) - require.NoError(t, err, "backup did not become ready: %s", err) - - // now restore the backup - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = util.NewString(name) - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore != nil { - result := status.Restore - - if result.RequestedFrom != name { - return fmt.Errorf("Wrong backup in RequestedFrom: %s, expected %s", result.RequestedFrom, name) - } - - if result.State == api.DeploymentRestoreStateRestoreFailed { - t.Fatalf("Failed to restore backup: %s", result.Message) - } - - if result.State == api.DeploymentRestoreStateRestored { - return nil - } - - return fmt.Errorf("Not yet restored - staate %s", result.State) - } - - return fmt.Errorf("Restore is not set on deployment") - }) - require.NoError(t, err, "Deployment did not restore in time: %s", err) - - // restore was completed, check if documents are there - found, err := col.DocumentExists(ctx, meta1.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.True(t, found) - - // second document should not exist - found, err = col.DocumentExists(ctx, meta2.Key) - require.NoError(t, err, "Failed to check if document exists: %s", err) - require.False(t, found) - - // delete the RestoreFrom entry - _, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.RestoreFrom = nil - }) - require.NoError(t, err, "Failed to update deployment: %s", err) - - // wait for it to be deleted in the status - waitUntilDeployment(deploymentClient, depl.GetName(), ns, func(depl *api.ArangoDeployment) error { - status := depl.Status - if status.Restore == nil { - return nil - } - - return fmt.Errorf("Restore is not set to nil") - }) - }) - - t.Run("create-backup-policy", func(t *testing.T) { - skipOrRemotePath(t) - - selector := metav1.FormatLabelSelector(&metav1.LabelSelector{ - MatchLabels: deplLabels, - }) - - policy := newBackupPolicy(depl.GetName(), "*/1 * * * *", deplLabels, nil) - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - require.NoError(t, err) - require.Len(t, list.Items, 0, "unexpected matching ArangoBackup objects") - - _, err = backupPolicyClient.Create(context.Background(), policy, metav1.CreateOptions{}) - require.NoError(t, err) - defer backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{}) - - // Wait until 2 backups are created - err = timeout(5*time.Second, 5*time.Minute, func() error { - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - - if err != nil { - return err - } - - t.Logf("Received %d ArangoBackups from label selector %s", len(list.Items), selector) - - if len(list.Items) < 2 { - return nil - } - - return interrupt{} - }) - require.NoError(t, err) - - // Cleanup scheduler - backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{}) - - backups, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{ - MatchLabels: deplLabels, - })}) - require.NoError(t, err) - - for _, backup := range backups.Items { - t.Run(fmt.Sprintf("deleting - %s", backup.Name), func(t *testing.T) { - defer backupClient.Delete(context.Background(), backup.Name, metav1.DeleteOptions{}) - - currentBackup, err := waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsAvailable) - require.NoError(t, err, "backup did not become available: %s", err) - backupID := currentBackup.Status.Backup.ID - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClient, driver.BackupID(backupID)) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, currentBackup.Status.Backup.Version) - require.Equal(t, depl.GetName(), currentBackup.Spec.Deployment.Name) - }) - } - - // Cleanup - err = timeout(time.Second, 2*time.Minute, func() error { - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - if err != nil { - return err - } - - if len(list.Items) != 0 { - return nil - } - - return interrupt{} - }) - require.NoError(t, err) - }) - - t.Run("create-backup-policy-multiple", func(t *testing.T) { - skipOrRemotePath(t) - - labels := map[string]string{ - "COMMON": "1", - } - selector := metav1.FormatLabelSelector(&metav1.LabelSelector{ - MatchLabels: labels, - }) - - policy := newBackupPolicy(depl.GetName(), "*/1 * * * *", labels, nil) - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - require.NoError(t, err) - require.Len(t, list.Items, 0, "unexpected matching ArangoBackup objects") - - _, err = backupPolicyClient.Create(context.Background(), policy, metav1.CreateOptions{}) - require.NoError(t, err) - defer backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{}) - - // Wait until 2 backups are created - err = timeout(5*time.Second, 5*time.Minute, func() error { - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - - if err != nil { - return err - } - - t.Logf("Received %d ArangoBackups from label selector %s", len(list.Items), selector) - - if len(list.Items) < 4 { - return nil - } - - return interrupt{} - }) - require.NoError(t, err) - - // Cleanup scheduler - backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{}) - - for _, deployment := range deployments { - t.Run(fmt.Sprintf("deployment %s", deployment.Name), func(t *testing.T) { - backups, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{ - MatchLabels: deployment.Labels, - })}) - require.NoError(t, err) - - require.Len(t, backups.Items, 2) - - for _, backup := range backups.Items { - t.Run(fmt.Sprintf("deleting - %s", backup.Name), func(t *testing.T) { - defer backupClient.Delete(context.Background(), backup.Name, metav1.DeleteOptions{}) - - currentBackup, err := waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsAvailable) - require.NoError(t, err, "backup did not become available: %s", err) - backupID := currentBackup.Status.Backup.ID - - // check that the backup is actually available - found, meta, err := statBackupMeta(databaseClients[deployment], driver.BackupID(backupID)) - require.NoError(t, err, "Backup test failed: %s", err) - require.True(t, found) - require.Equal(t, meta.Version, currentBackup.Status.Backup.Version) - require.Equal(t, deployment.GetName(), currentBackup.Spec.Deployment.Name) - }) - } - }) - } - - // Cleanup - err = timeout(time.Second, 2*time.Minute, func() error { - list, err := backupClient.List(context.Background(), metav1.ListOptions{LabelSelector: selector}) - if err != nil { - return err - } - - if len(list.Items) != 0 { - return nil - } - - return interrupt{} - }) - require.NoError(t, err) - }) -} diff --git a/tests/change_args_test.go b/tests/change_args_test.go deleted file mode 100644 index 28d256f90..000000000 --- a/tests/change_args_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" -) - -// TestChangeArgsAgents tests the creating of an active failover deployment -// with default settings and once ready changes the arguments of the agents. -func TestChangeArgsAgents(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-chga-rs-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl,metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("ActiveFailover servers not running returning version in time: %v", err) - } - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingleActive)) - - // Now change agent arguments - if _, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.Agents.Args = []string{"--log.level=DEBUG"} - }); err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait until all agents have the right arguments - if _, err := waitUntilDeployment(c, depl.GetName(), ns, func(d *api.ArangoDeployment) error { - members := d.Status.Members - if len(members.Agents) != 3 { - return fmt.Errorf("Expected 3 agents, got %d", len(members.Agents)) - } - pods := kubecli.CoreV1().Pods(ns) - for _, m := range members.Agents { - pod, err := pods.Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - found := false - for _, c := range pod.Spec.Containers { - if c.Name != k8sutil.ServerContainerName { - continue - } - // Check command - for _, a := range append(c.Args, c.Command...) { - if a == "--log.level=DEBUG" { - found = true - } - } - } - if !found { - return fmt.Errorf("Did not find new argument") - } - } - return nil - }, time.Minute*10); err != nil { - t.Fatalf("Deployment not updated in time: %v", err) - } -} - -// TestChangeArgsDBServer tests the creating of a cluster deployment -// with default settings and once ready changes the arguments of the dbservers. -func TestChangeArgsDBServer(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-chga-db-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster servers not running returning version in time: %v", err) - } - - // Now change agent arguments - if _, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Args = []string{"--log.level=DEBUG"} - }); err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait until all dbservers have the right arguments - if _, err := waitUntilDeployment(c, depl.GetName(), ns, func(d *api.ArangoDeployment) error { - members := d.Status.Members - if len(members.DBServers) != 3 { - return fmt.Errorf("Expected 3 dbservers, got %d", len(members.DBServers)) - } - pods := kubecli.CoreV1().Pods(ns) - for _, m := range members.DBServers { - pod, err := pods.Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - found := false - for _, c := range pod.Spec.Containers { - if c.Name != k8sutil.ServerContainerName { - continue - } - // Check command - for _, a := range append(c.Args, c.Command...) { - if a == "--log.level=DEBUG" { - found = true - } - } - } - if !found { - return fmt.Errorf("Did not find new argument") - } - } - return nil - }, time.Minute*10); err != nil { - t.Fatalf("Deployment not updated in time: %v", err) - } -} diff --git a/tests/cursor_test.go b/tests/cursor_test.go deleted file mode 100644 index 3533ddab4..000000000 --- a/tests/cursor_test.go +++ /dev/null @@ -1,376 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "reflect" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/require" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" -) - -// TestCursorSingle tests the creating of a single server deployment -// with default settings and runs some cursor requests on it. -func TestCursorSingle(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-cur-sng-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Check server role - require.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingle)) - - // Run cursor tests - runCursorTests(t, client) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestCursorActiveFailover tests the creating of a ActiveFailover server deployment -// with default settings. -func TestCursorActiveFailover(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-cur-rs-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("ActiveFailover servers not running returning version in time: %v", err) - } - - // Check server role - require.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingleActive)) - - // Run cursor tests - runCursorTests(t, client) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestCursorCluster tests the creating of a cluster deployment -// with default settings. -func TestCursorCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-cur-cls-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Check server role - require.NoError(t, testServerRole(ctx, client, driver.ServerRoleCoordinator)) - - // Run cursor tests - runCursorTests(t, client) - - // cleanup - removeDeployment(c, depl.GetName(), ns) -} - -type Book struct { - Title string -} - -type UserDoc struct { - Name string `json:"name"` - Age int `json:"age"` -} - -type queryTest struct { - Query string - BindVars map[string]interface{} - ExpectSuccess bool - ExpectedDocuments []interface{} - DocumentType reflect.Type -} - -type queryTestContext struct { - Context context.Context - ExpectCount bool -} - -func runCursorTests(t *testing.T, client driver.Client) { - // Create data set - collectionData := map[string][]interface{}{ - "books": []interface{}{ - Book{Title: "Book 01"}, - Book{Title: "Book 02"}, - Book{Title: "Book 03"}, - Book{Title: "Book 04"}, - Book{Title: "Book 05"}, - Book{Title: "Book 06"}, - Book{Title: "Book 07"}, - Book{Title: "Book 08"}, - Book{Title: "Book 09"}, - Book{Title: "Book 10"}, - Book{Title: "Book 11"}, - Book{Title: "Book 12"}, - Book{Title: "Book 13"}, - Book{Title: "Book 14"}, - Book{Title: "Book 15"}, - Book{Title: "Book 16"}, - Book{Title: "Book 17"}, - Book{Title: "Book 18"}, - Book{Title: "Book 19"}, - Book{Title: "Book 20"}, - }, - "users": []interface{}{ - UserDoc{Name: "John", Age: 13}, - UserDoc{Name: "Jake", Age: 25}, - UserDoc{Name: "Clair", Age: 12}, - UserDoc{Name: "Johnny", Age: 42}, - UserDoc{Name: "Blair", Age: 67}, - UserDoc{Name: "Zz", Age: 12}, - }, - } - ctx := context.Background() - db := ensureDatabase(ctx, client, "cursur_test", nil, t) - for colName, colDocs := range collectionData { - col := ensureCollection(ctx, db, colName, nil, t) - if _, _, err := col.CreateDocuments(ctx, colDocs); err != nil { - t.Fatalf("Expected success, got %s", err) - } - } - - // Setup tests - tests := []queryTest{ - queryTest{ - Query: "FOR d IN books SORT d.Title RETURN d", - ExpectSuccess: true, - ExpectedDocuments: collectionData["books"], - DocumentType: reflect.TypeOf(Book{}), - }, - queryTest{ - Query: "FOR d IN books FILTER d.Title==@title SORT d.Title RETURN d", - BindVars: map[string]interface{}{"title": "Book 02"}, - ExpectSuccess: true, - ExpectedDocuments: []interface{}{collectionData["books"][1]}, - DocumentType: reflect.TypeOf(Book{}), - }, - queryTest{ - Query: "FOR d IN books FILTER d.Title==@title SORT d.Title RETURN d", - BindVars: map[string]interface{}{"somethingelse": "Book 02"}, - ExpectSuccess: false, // Unknown `@title` - }, - queryTest{ - Query: "FOR u IN users FILTER u.age>100 SORT u.name RETURN u", - ExpectSuccess: true, - ExpectedDocuments: []interface{}{}, - DocumentType: reflect.TypeOf(UserDoc{}), - }, - queryTest{ - Query: "FOR u IN users FILTER u.age<@maxAge SORT u.name RETURN u", - BindVars: map[string]interface{}{"maxAge": 20}, - ExpectSuccess: true, - ExpectedDocuments: []interface{}{collectionData["users"][2], collectionData["users"][0], collectionData["users"][5]}, - DocumentType: reflect.TypeOf(UserDoc{}), - }, - queryTest{ - Query: "FOR u IN users FILTER u.age<@maxAge SORT u.name RETURN u", - BindVars: map[string]interface{}{"maxage": 20}, - ExpectSuccess: false, // `@maxage` versus `@maxAge` - }, - queryTest{ - Query: "FOR u IN users SORT u.age RETURN u.age", - ExpectedDocuments: []interface{}{12, 12, 13, 25, 42, 67}, - DocumentType: reflect.TypeOf(12), - ExpectSuccess: true, - }, - queryTest{ - Query: "FOR p IN users COLLECT a = p.age WITH COUNT INTO c SORT a RETURN [a, c]", - ExpectedDocuments: []interface{}{[]int{12, 2}, []int{13, 1}, []int{25, 1}, []int{42, 1}, []int{67, 1}}, - DocumentType: reflect.TypeOf([]int{}), - ExpectSuccess: true, - }, - queryTest{ - Query: "FOR u IN users SORT u.name RETURN u.name", - ExpectedDocuments: []interface{}{"Blair", "Clair", "Jake", "John", "Johnny", "Zz"}, - DocumentType: reflect.TypeOf("foo"), - ExpectSuccess: true, - }, - } - - // Setup context alternatives - contexts := []queryTestContext{ - queryTestContext{nil, false}, - queryTestContext{context.Background(), false}, - queryTestContext{driver.WithQueryCount(context.Background()), true}, - queryTestContext{driver.WithQueryCount(context.Background(), true), true}, - queryTestContext{driver.WithQueryCount(context.Background(), false), false}, - queryTestContext{driver.WithQueryBatchSize(context.Background(), 1), false}, - queryTestContext{driver.WithQueryCache(context.Background()), false}, - queryTestContext{driver.WithQueryCache(context.Background(), true), false}, - queryTestContext{driver.WithQueryCache(context.Background(), false), false}, - queryTestContext{driver.WithQueryMemoryLimit(context.Background(), 600000), false}, - queryTestContext{driver.WithQueryTTL(context.Background(), time.Minute), false}, - queryTestContext{driver.WithQueryBatchSize(driver.WithQueryCount(context.Background()), 1), true}, - queryTestContext{driver.WithQueryCache(driver.WithQueryCount(driver.WithQueryBatchSize(context.Background(), 2))), true}, - } - - // Run tests for every context alternative - for _, qctx := range contexts { - ctx := qctx.Context - for i, test := range tests { - cursor, err := db.Query(ctx, test.Query, test.BindVars) - if err == nil { - // Close upon exit of the function - defer cursor.Close() - } - if test.ExpectSuccess { - if err != nil { - t.Errorf("Expected success in query %d (%s), got '%s'", i, test.Query, err) - continue - } - count := cursor.Count() - if qctx.ExpectCount { - if count != int64(len(test.ExpectedDocuments)) { - t.Errorf("Expected count of %d, got %d in query %d (%s)", len(test.ExpectedDocuments), count, i, test.Query) - } - } else { - if count != 0 { - t.Errorf("Expected count of 0, got %d in query %d (%s)", count, i, test.Query) - } - } - var result []interface{} - for { - hasMore := cursor.HasMore() - doc := reflect.New(test.DocumentType) - if _, err := cursor.ReadDocument(ctx, doc.Interface()); driver.IsNoMoreDocuments(err) { - if hasMore { - t.Error("HasMore returned true, but ReadDocument returns a IsNoMoreDocuments error") - } - break - } else if err != nil { - t.Errorf("Failed to result document %d: %s", len(result), err) - } - if !hasMore { - t.Error("HasMore returned false, but ReadDocument returns a document") - } - result = append(result, doc.Elem().Interface()) - } - if len(result) != len(test.ExpectedDocuments) { - t.Errorf("Expected %d documents, got %d in query %d (%s)", len(test.ExpectedDocuments), len(result), i, test.Query) - } else { - for resultIdx, resultDoc := range result { - if !reflect.DeepEqual(resultDoc, test.ExpectedDocuments[resultIdx]) { - t.Errorf("Unexpected document in query %d (%s) at index %d: got %+v, expected %+v", i, test.Query, resultIdx, resultDoc, test.ExpectedDocuments[resultIdx]) - } - } - } - // Close anyway (this tests calling Close more than once) - if err := cursor.Close(); err != nil { - t.Errorf("Expected success in Close of cursor from query %d (%s), got '%s'", i, test.Query, err) - } - } else { - if err == nil { - t.Errorf("Expected error in query %d (%s), got '%s'", i, test.Query, err) - continue - } - } - } - } - -} diff --git a/tests/db_util.go b/tests/db_util.go deleted file mode 100644 index dde28f9d5..000000000 --- a/tests/db_util.go +++ /dev/null @@ -1,64 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "testing" - - driver "github.com/arangodb/go-driver" -) - -// ensureDatabase is a helper to check if a database exists and create it if needed. -// It will fail the test when an error occurs. -func ensureDatabase(ctx context.Context, c driver.Client, name string, options *driver.CreateDatabaseOptions, t *testing.T) driver.Database { - db, err := c.Database(ctx, name) - if driver.IsNotFound(err) { - db, err = c.CreateDatabase(ctx, name, options) - if err != nil { - if driver.IsConflict(err) { - t.Fatalf("Failed to create database (conflict) '%s': %s %#v", name, err, err) - } else { - t.Fatalf("Failed to create database '%s': %s %#v", name, err, err) - } - } - } else if err != nil { - t.Fatalf("Failed to open database '%s': %s", name, err) - } - return db -} - -// ensureCollection is a helper to check if a collection exists and create if if needed. -// It will fail the test when an error occurs. -func ensureCollection(ctx context.Context, db driver.Database, name string, options *driver.CreateCollectionOptions, t *testing.T) driver.Collection { - c, err := db.Collection(ctx, name) - if driver.IsNotFound(err) { - c, err = db.CreateCollection(ctx, name, options) - if err != nil { - t.Fatalf("Failed to create collection '%s': %s", name, err) - } - } else if err != nil { - t.Fatalf("Failed to open collection '%s': %s", name, err) - } - return c -} diff --git a/tests/deployments_test.go b/tests/deployments_test.go deleted file mode 100644 index c249fb05d..000000000 --- a/tests/deployments_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Jan Christoph Uhde -// -package tests - -import ( - "context" - "fmt" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - driver "github.com/arangodb/go-driver" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" -) - -// test deployment single server mmfiles -func TestDeploymentSingleMMFiles(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeSingle, api.StorageEngineMMFiles) -} - -// test deployment single server rocksdb -func TestDeploymentSingleRocksDB(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeSingle, api.StorageEngineRocksDB) -} - -// test deployment active-failover server mmfiles -func TestDeploymentActiveFailoverMMFiles(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeActiveFailover, api.StorageEngineMMFiles) -} - -// test deployment active-failover server rocksdb -func TestDeploymentActiveFailoverRocksDB(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeActiveFailover, api.StorageEngineRocksDB) -} - -// test deployment cluster mmfiles -func TestDeploymentClusterMMFiles(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeCluster, api.StorageEngineMMFiles) -} - -// test deployment cluster rocksdb -func TestDeploymentClusterRocksDB(t *testing.T) { - deploymentSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB) -} - -func deploymentSubTest(t *testing.T, mode api.DeploymentMode, engine api.StorageEngine) { - // check environment - longOrSkip(t) - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - // Prepare deployment config - depl := newDeployment("test-deployment-" + string(mode) + "-" + string(engine) + "-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(mode) - depl.Spec.StorageEngine = api.NewStorageEngine(engine) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - require.NoError(t, err, fmt.Sprintf("Create deployment failed: %v", err)) - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - deployment, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %v", err)) - - // Create a database client - ctx := context.Background() - DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil) - require.NoError(t, waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, ""), fmt.Sprintf("Deployment not healthy in time: %v", err)) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// test a setup containing multiple deployments -func TestMultiDeployment(t *testing.T) { - longOrSkip(t) - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - // Prepare deployment configurations - depl1 := newDeployment("test-multidep-1-" + uniuri.NewLen(4)) - depl1.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl1.Spec.StorageEngine = api.NewStorageEngine(api.StorageEngineRocksDB) - depl1.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl1.Spec.SetDefaults(depl1.GetName()) // this must be last - - depl2 := newDeployment("test-multidep-2-" + uniuri.NewLen(4)) - depl2.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl2.Spec.StorageEngine = api.NewStorageEngine(api.StorageEngineMMFiles) - depl2.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl2.Spec.SetDefaults(depl2.GetName()) // this must be last - - // Create deployments - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl1, metav1.CreateOptions{}) - require.NoError(t, err, fmt.Sprintf("Deployment creation failed: %v", err)) - defer deferedCleanupDeployment(c, depl1.GetName(), ns) - - _, err = c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl2, metav1.CreateOptions{}) - require.NoError(t, err, fmt.Sprintf("Deployment creation failed: %v", err)) - defer deferedCleanupDeployment(c, depl2.GetName(), ns) - - // Wait for deployments to be ready - deployment1, err := waitUntilDeployment(c, depl1.GetName(), ns, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %v", err)) - - deployment2, err := waitUntilDeployment(c, depl2.GetName(), ns, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %v", err)) - - require.True(t, deployment1 != nil && deployment2 != nil, "deployment is nil") - - // Create a database clients - ctx := context.Background() - DBClient1 := mustNewArangodDatabaseClient(ctx, kubecli, deployment1, t, nil) - require.NoError(t, waitUntilArangoDeploymentHealthy(deployment1, DBClient1, kubecli, ""), fmt.Sprintf("Deployment not healthy in time: %v", err)) - DBClient2 := mustNewArangodDatabaseClient(ctx, kubecli, deployment2, t, nil) - require.NoError(t, waitUntilArangoDeploymentHealthy(deployment1, DBClient1, kubecli, ""), fmt.Sprintf("Deployment not healthy in time: %v", err)) - - // Test if we are able to create a collections in both deployments. - db1, err := DBClient1.Database(ctx, "_system") - require.NoError(t, err, "failed to get database") - _, err = db1.CreateCollection(ctx, "col1", nil) - require.NoError(t, err, "failed to create collection") - - db2, err := DBClient2.Database(ctx, "_system") - require.NoError(t, err, "failed to get database") - _, err = db2.CreateCollection(ctx, "col2", nil) - require.NoError(t, err, "failed to create collection") - - // The newly created collections must be (only) visible in the deployment - // that it was created in. The following lines ensure this behavior. - collections1, err := db1.Collections(ctx) - require.NoError(t, err, "failed to get collections") - collections2, err := db2.Collections(ctx) - require.NoError(t, err, "failed to get collections") - - assert.True(t, containsCollection(collections1, "col1"), "collection missing") - assert.True(t, containsCollection(collections2, "col2"), "collection missing") - assert.False(t, containsCollection(collections1, "col2"), "collection must not be in this deployment") - assert.False(t, containsCollection(collections2, "col1"), "collection must not be in this deployment") - - // Cleanup - removeDeployment(c, depl1.GetName(), ns) - removeDeployment(c, depl2.GetName(), ns) - -} - -func containsCollection(colls []driver.Collection, name string) bool { - for _, col := range colls { - if name == col.Name() { - return true - } - } - return false -} diff --git a/tests/duration/Dockerfile b/tests/duration/Dockerfile deleted file mode 100644 index b00043c4f..000000000 --- a/tests/duration/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM scratch - -ADD bin/arangodb_operator_duration_test /usr/bin/ - -ENTRYPOINT [ "/usr/bin/arangodb_operator_duration_test" ] \ No newline at end of file diff --git a/tests/duration/README.md b/tests/duration/README.md deleted file mode 100644 index 79f9e1024..000000000 --- a/tests/duration/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Kube-ArangoDB duration test - -This test is a simple application that keeps accessing the database with various requests. - -## Building - -In root of kube-arangodb repository, run: - -```bash -make docker-duration-test -``` - -## Running - -Start an ArangoDB `Cluster` deployment. - -Run: - -```bash -kubectl run \ - --image=${DOCKERNAMESPACE}/kube-arangodb-durationtest:dev \ - --image-pull-policy=Always duration-test \ - -- \ - --cluster=https://..svc:8529 \ - --username=root -``` - -To remove the test, run: - -```bash -kubectl delete -n deployment/duration-test -``` diff --git a/tests/duration/main.go b/tests/duration/main.go deleted file mode 100644 index d41c034c3..000000000 --- a/tests/duration/main.go +++ /dev/null @@ -1,132 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package main - -import ( - "context" - "crypto/tls" - "flag" - "fmt" - "log" - "os" - "os/signal" - "strings" - "syscall" - "time" - - driver "github.com/arangodb/go-driver" - "github.com/arangodb/go-driver/http" - "github.com/pkg/errors" - - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -const ( - defaultTestDuration = time.Hour * 24 * 7 // 7 days -) - -var ( - maskAny = errors.WithStack - userName string - password string - clusterEndpoints string - testDuration time.Duration -) - -func init() { - flag.StringVar(&userName, "username", "", "Authenticating username") - flag.StringVar(&password, "password", "", "Authenticating password") - flag.StringVar(&clusterEndpoints, "cluster", "", "Endpoints for database cluster") - flag.DurationVar(&testDuration, "duration", defaultTestDuration, "Duration of the test") -} - -func main() { - flag.Parse() - - // Create clients & wait for cluster available - client, err := createClusterClient(clusterEndpoints, userName, password) - if err != nil { - log.Fatalf("Failed to create cluster client: %#v\n", err) - } - if err := waitUntilClusterUp(client); err != nil { - log.Fatalf("Failed to reach cluster: %#v\n", err) - } - - // Start running tests - ctx, cancel := context.WithCancel(context.Background()) - sigChannel := make(chan os.Signal, 1) - signal.Notify(sigChannel, os.Interrupt, syscall.SIGTERM) - go handleSignal(sigChannel, cancel) - runTestLoop(ctx, client, testDuration) -} - -// createClusterClient creates a configuration, connection and client for -// one of the two ArangoDB clusters in the test. It uses the go-driver. -// It needs a list of endpoints. -func createClusterClient(endpoints string, user string, password string) (driver.Client, error) { - // This will always use HTTP, and user and password authentication - config := http.ConnectionConfig{ - Endpoints: strings.Split(endpoints, ","), - TLSConfig: &tls.Config{InsecureSkipVerify: true}, - } - connection, err := http.NewConnection(config) - if err != nil { - return nil, maskAny(err) - } - clientCfg := driver.ClientConfig{ - Connection: connection, - Authentication: driver.BasicAuthentication(user, password), - } - client, err := driver.NewClient(clientCfg) - if err != nil { - return nil, maskAny(err) - } - return client, nil -} - -func waitUntilClusterUp(c driver.Client) error { - op := func() error { - ctx := context.Background() - if _, err := c.Version(ctx); err != nil { - return maskAny(err) - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - return maskAny(err) - } - return nil -} - -// handleSignal listens for termination signals and stops this process on termination. -func handleSignal(sigChannel chan os.Signal, cancel context.CancelFunc) { - signalCount := 0 - for s := range sigChannel { - signalCount++ - fmt.Println("Received signal:", s) - if signalCount > 1 { - os.Exit(1) - } - cancel() - } -} diff --git a/tests/duration/simple/error.go b/tests/duration/simple/error.go deleted file mode 100644 index f40dc6621..000000000 --- a/tests/duration/simple/error.go +++ /dev/null @@ -1,31 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "github.com/pkg/errors" -) - -var ( - maskAny = errors.WithStack -) diff --git a/tests/duration/simple/simple.go b/tests/duration/simple/simple.go deleted file mode 100644 index a00a9f5ae..000000000 --- a/tests/duration/simple/simple.go +++ /dev/null @@ -1,679 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - "io" - "math/rand" - "os" - "sort" - "sync" - "sync/atomic" - "time" - - driver "github.com/arangodb/go-driver" - "github.com/pkg/errors" - "github.com/rs/zerolog" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -type SimpleConfig struct { - MaxDocuments int - MaxCollections int -} - -const ( - initialDocumentCount = 999 -) - -type simpleTest struct { - SimpleConfig - activeMutex sync.Mutex - logPath string - reportDir string - log zerolog.Logger - listener test.TestListener - stop chan struct{} - active bool - pauseRequested bool - paused bool - client driver.Client - db driver.Database - failures int - actions int - collections map[string]*collection - collectionsMutex sync.Mutex - lastCollectionIndex int32 - readExistingCounter counter - readExistingWrongRevisionCounter counter - readNonExistingCounter counter - createCounter counter - createCollectionCounter counter - removeExistingCollectionCounter counter - updateExistingCounter counter - updateExistingWrongRevisionCounter counter - updateNonExistingCounter counter - replaceExistingCounter counter - replaceExistingWrongRevisionCounter counter - replaceNonExistingCounter counter - deleteExistingCounter counter - deleteExistingWrongRevisionCounter counter - deleteNonExistingCounter counter - importCounter counter - queryCreateCursorCounter counter - queryNextBatchCounter counter - queryNextBatchNewCoordinatorCounter counter - queryLongRunningCounter counter - rebalanceShardsCounter counter - queryUpdateCounter counter - queryUpdateLongRunningCounter counter -} - -type counter struct { - succeeded int - failed int -} - -type collection struct { - name string - existingDocs map[string]UserDocument -} - -// NewSimpleTest creates a simple test -func NewSimpleTest(log zerolog.Logger, reportDir string, config SimpleConfig) test.TestScript { - return &simpleTest{ - SimpleConfig: config, - reportDir: reportDir, - log: log, - collections: make(map[string]*collection), - } -} - -// Name returns the name of the script -func (t *simpleTest) Name() string { - return "simple" -} - -// Start triggers the test script to start. -// It should spwan actions in a go routine. -func (t *simpleTest) Start(client driver.Client, listener test.TestListener) error { - t.activeMutex.Lock() - defer t.activeMutex.Unlock() - - if t.active { - // No restart unless needed - return nil - } - - t.listener = listener - t.client = client - ctx := context.Background() - db, err := client.Database(ctx, "_system") - if err != nil { - return maskAny(err) - } - t.db = db - - // Cleanup of old data - for i := 1; i <= t.MaxCollections; i++ { - col, err := db.Collection(ctx, t.getCollectionName(i)) - if err == nil { - if err := col.Remove(ctx); err != nil { - return errors.Wrapf(err, "Failed to remove collection %s", col.Name()) - } - } else if !driver.IsNotFound(err) { - return maskAny(err) - } - } - - t.active = true - go t.testLoop() - return nil -} - -// Stop any running test. This should not return until tests are actually stopped. -func (t *simpleTest) Stop() error { - t.activeMutex.Lock() - defer t.activeMutex.Unlock() - - if !t.active { - // No active, nothing to stop - return nil - } - - stop := make(chan struct{}) - t.stop = stop - <-stop - return nil -} - -// Interrupt the tests, but be prepared to continue. -func (t *simpleTest) Pause() error { - t.pauseRequested = true - return nil -} - -// Resume running the tests, where Pause interrupted it. -func (t *simpleTest) Resume() error { - t.pauseRequested = false - return nil -} - -// Status returns the current status of the test -func (t *simpleTest) Status() test.TestStatus { - cc := func(name string, c counter) test.Counter { - return test.Counter{ - Name: name, - Succeeded: c.succeeded, - Failed: c.failed, - } - } - - status := test.TestStatus{ - Active: t.active && !t.paused, - Pausing: t.pauseRequested && t.paused, - Failures: t.failures, - Actions: t.actions, - Counters: []test.Counter{ - cc("#collections created", t.createCollectionCounter), - cc("#collections removed", t.removeExistingCollectionCounter), - cc("#documents created", t.createCounter), - cc("#existing documents read", t.readExistingCounter), - cc("#existing documents updated", t.updateExistingCounter), - cc("#existing documents replaced", t.replaceExistingCounter), - cc("#existing documents removed", t.deleteExistingCounter), - cc("#existing documents wrong revision read", t.readExistingWrongRevisionCounter), - cc("#existing documents wrong revision updated", t.updateExistingWrongRevisionCounter), - cc("#existing documents wrong revision replaced", t.replaceExistingWrongRevisionCounter), - cc("#existing documents wrong revision removed", t.deleteExistingWrongRevisionCounter), - cc("#non-existing documents read", t.readNonExistingCounter), - cc("#non-existing documents updated", t.updateNonExistingCounter), - cc("#non-existing documents replaced", t.replaceNonExistingCounter), - cc("#non-existing documents removed", t.deleteNonExistingCounter), - cc("#import operations", t.importCounter), - cc("#create AQL cursor operations", t.queryCreateCursorCounter), - cc("#fetch next AQL cursor batch operations", t.queryNextBatchCounter), - cc("#fetch next AQL cursor batch after coordinator change operations", t.queryNextBatchNewCoordinatorCounter), - cc("#long running AQL query operations", t.queryLongRunningCounter), - cc("#rebalance shards operations", t.rebalanceShardsCounter), - cc("#update AQL query operations", t.queryUpdateCounter), - cc("#long running update AQL query operations", t.queryUpdateLongRunningCounter), - }, - } - - t.collectionsMutex.Lock() - for _, c := range t.collections { - status.Messages = append(status.Messages, - fmt.Sprintf("Current #documents in %s: %d", c.name, len(c.existingDocs)), - ) - } - t.collectionsMutex.Unlock() - - return status -} - -// CollectLogs copies all logging info to the given writer. -func (t *simpleTest) CollectLogs(w io.Writer) error { - if logPath := t.logPath; logPath == "" { - // Nothing to log yet - return nil - } else { - rd, err := os.Open(logPath) - if err != nil { - return maskAny(err) - } - defer rd.Close() - if _, err := io.Copy(w, rd); err != nil { - return maskAny(err) - } - return nil - } -} - -func (t *simpleTest) shouldStop() bool { - // Should we stop? - if stop := t.stop; stop != nil { - stop <- struct{}{} - return true - } - return false -} - -type UserDocument struct { - Key string `json:"_key"` - rev string // Note that we do not export this field! - Value int `json:"value"` - Name string `json:"name"` - Odd bool `json:"odd"` -} - -// Equals returns true when the value fields of `d` and `other` are the equal. -func (d UserDocument) Equals(other UserDocument) bool { - return d.Value == other.Value && - d.Name == other.Name && - d.Odd == other.Odd -} - -func (t *simpleTest) reportFailure(f test.Failure) { - t.failures++ - t.listener.ReportFailure(f) -} - -func (t *simpleTest) testLoop() { - t.active = true - t.actions = 0 - defer func() { t.active = false }() - - if err := t.createAndInitCollection(); err != nil { - t.log.Error().Msgf("Failed to create&init first collection: %v. Giving up", err) - return - } - - var plan []int - planIndex := 0 - for { - // Should we stop - if t.shouldStop() { - return - } - if t.pauseRequested { - t.paused = true - time.Sleep(time.Second * 2) - continue - } - t.paused = false - t.actions++ - if plan == nil || planIndex >= len(plan) { - plan = createTestPlan(20) // Update when more tests are added - planIndex = 0 - } - - switch plan[planIndex] { - case 0: - // Create collection with initial data - if len(t.collections) < t.MaxCollections && rand.Intn(100)%2 == 0 { - if err := t.createAndInitCollection(); err != nil { - t.log.Error().Msgf("Failed to create&init collection: %v", err) - } - } - planIndex++ - - case 1: - // Remove an existing collection - if len(t.collections) > 1 && rand.Intn(100)%2 == 0 { - c := t.selectRandomCollection() - if err := t.removeExistingCollection(c); err != nil { - t.log.Error().Msgf("Failed to remove existing collection: %#v", err) - } - } - planIndex++ - - case 2: - // Create a random document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) < t.MaxDocuments { - userDoc := UserDocument{ - Key: c.createNewKey(true), - Value: rand.Int(), - Name: fmt.Sprintf("User %d", time.Now().Nanosecond()), - Odd: time.Now().Nanosecond()%2 == 1, - } - if rev, err := t.createDocument(c, userDoc, userDoc.Key); err != nil { - t.log.Error().Msgf("Failed to create document: %#v", err) - } else { - userDoc.rev = rev - c.existingDocs[userDoc.Key] = userDoc - - // Now try to read it, it must exist - //t.client.SetCoordinator("") - if err := t.readExistingDocument(c, userDoc.Key, false); err != nil { - t.log.Error().Msgf("Failed to read just-created document '%s': %#v", userDoc.Key, err) - } - } - } - } - planIndex++ - - case 3: - // Read a random existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if err := t.readExistingDocument(c, randomKey, false); err != nil { - t.log.Error().Msgf("Failed to read existing document '%s': %#v", randomKey, err) - } - } - } - planIndex++ - - case 4: - // Read a random existing document but with wrong revision - planIndex++ - - case 5: - // Read a random non-existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - randomKey := c.createNewKey(false) - if err := t.readNonExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to read non-existing document '%s': %#v", randomKey, err) - } - } - planIndex++ - - case 6: - // Remove a random existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if err := t.removeExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to remove existing document '%s': %#v", randomKey, err) - } else { - // Remove succeeded, key should no longer exist - c.removeExistingKey(randomKey) - - // Now try to read it, it should not exist - //t.client.SetCoordinator("") - if err := t.readNonExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to read just-removed document '%s': %#v", randomKey, err) - } - } - } - } - planIndex++ - - case 7: - // Remove a random existing document but with wrong revision - planIndex++ - - case 8: - // Remove a random non-existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - randomKey := c.createNewKey(false) - if err := t.removeNonExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to remove non-existing document '%s': %#v", randomKey, err) - } - } - planIndex++ - - case 9: - // Update a random existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if _, err := t.updateExistingDocument(c, randomKey); err != nil { - t.log.Error().Msgf("Failed to update existing document '%s': %#v", randomKey, err) - } else { - // Updated succeeded, now try to read it, it should exist and be updated - //t.client.SetCoordinator("") - if err := t.readExistingDocument(c, randomKey, false); err != nil { - t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err) - } - } - } - } - planIndex++ - - case 10: - // Update a random existing document but with wrong revision - planIndex++ - - case 11: - // Update a random non-existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - randomKey := c.createNewKey(false) - if err := t.updateNonExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to update non-existing document '%s': %#v", randomKey, err) - } - } - planIndex++ - - case 12: - // Replace a random existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if _, err := t.replaceExistingDocument(c, randomKey); err != nil { - t.log.Error().Msgf("Failed to replace existing document '%s': %#v", randomKey, err) - } else { - // Replace succeeded, now try to read it, it should exist and be replaced - //t.client.SetCoordinator("") - if err := t.readExistingDocument(c, randomKey, false); err != nil { - t.log.Error().Msgf("Failed to read just-replaced document '%s': %#v", randomKey, err) - } - } - } - } - planIndex++ - - case 13: - // Replace a random existing document but with wrong revision - planIndex++ - - case 14: - // Replace a random non-existing document - if len(t.collections) > 0 { - c := t.selectRandomCollection() - randomKey := c.createNewKey(false) - if err := t.replaceNonExistingDocument(c.name, randomKey); err != nil { - t.log.Error().Msgf("Failed to replace non-existing document '%s': %#v", randomKey, err) - } - } - planIndex++ - - case 15: - // Query documents - planIndex++ - - case 16: - // Query documents (long running) - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if err := t.queryDocumentsLongRunning(c); err != nil { - t.log.Error().Msgf("Failed to query (long running) documents: %#v", err) - } - } - planIndex++ - - case 17: - // Rebalance shards - if err := t.rebalanceShards(); err != nil { - t.log.Error().Msgf("Failed to rebalance shards: %#v", err) - } - planIndex++ - - case 18: - // AQL update query - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if _, err := t.queryUpdateDocuments(c, randomKey); err != nil { - t.log.Error().Msgf("Failed to update document using AQL query: %#v", err) - } else { - // Updated succeeded, now try to read it (anywhere), it should exist and be updated - //t.client.SetCoordinator("") - if err := t.readExistingDocument(c, randomKey, false); err != nil { - t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err) - } - } - } - } - planIndex++ - - case 19: - // Long running AQL update query - if len(t.collections) > 0 { - c := t.selectRandomCollection() - if len(c.existingDocs) > 0 { - randomKey := c.selectRandomKey() - if _, err := t.queryUpdateDocumentsLongRunning(c, randomKey); err != nil { - t.log.Error().Msgf("Failed to update document using long running AQL query: %#v", err) - } else { - // Updated succeeded, now try to read it (anywhere), it should exist and be updated - //t.client.SetCoordinator("") - if err := t.readExistingDocument(c, randomKey, false); err != nil { - t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err) - } - } - } - } - planIndex++ - } - time.Sleep(time.Second * 2) - } -} - -// createTestPlan creates an int-array of 'steps' long with all values from 0..steps-1 in random order. -func createTestPlan(steps int) []int { - plan := make([]int, steps) - for i := 0; i < steps; i++ { - plan[i] = i - } - test.Shuffle(sort.IntSlice(plan)) - return plan -} - -// createNewCollectionName returns a new (unique) collection name -func (t *simpleTest) createNewCollectionName() string { - index := atomic.AddInt32(&t.lastCollectionIndex, 1) - return t.getCollectionName(int(index)) -} - -// getCollectionName returns a collection name with given index -func (t *simpleTest) getCollectionName(index int) string { - return fmt.Sprintf("simple_user_%d", index) -} - -func (t *simpleTest) selectRandomCollection() *collection { - index := rand.Intn(len(t.collections)) - for _, c := range t.collections { - if index == 0 { - return c - } - index-- - } - return nil // This should never be reached when len(t.collections) > 0 -} - -func (t *simpleTest) registerCollection(c *collection) { - t.collectionsMutex.Lock() - defer t.collectionsMutex.Unlock() - t.collections[c.name] = c -} - -func (t *simpleTest) unregisterCollection(c *collection) { - t.collectionsMutex.Lock() - defer t.collectionsMutex.Unlock() - delete(t.collections, c.name) -} - -func (t *simpleTest) createAndInitCollection() error { - c := &collection{ - name: t.createNewCollectionName(), - existingDocs: make(map[string]UserDocument), - } - if err := t.createCollection(c, 9, 2); err != nil { - t.reportFailure(test.NewFailure("Creating collection '%s' failed: %v", c.name, err)) - return maskAny(err) - } - t.registerCollection(c) - t.createCollectionCounter.succeeded++ - t.actions++ - - // Import documents - if err := t.importDocuments(c); err != nil { - t.reportFailure(test.NewFailure("Failed to import documents: %#v", err)) - } - t.actions++ - - // Check imported documents - for k := range c.existingDocs { - if t.shouldStop() || t.pauseRequested { - return nil - } - if err := t.readExistingDocument(c, k, true); err != nil { - t.reportFailure(test.NewFailure("Failed to read existing document '%s': %#v", k, err)) - } - t.actions++ - } - - // Create sample users - for i := 0; i < initialDocumentCount; i++ { - if t.shouldStop() || t.pauseRequested { - return nil - } - userDoc := UserDocument{ - Key: fmt.Sprintf("doc%05d", i), - Value: i, - Name: fmt.Sprintf("User %d", i), - Odd: i%2 == 1, - } - if rev, err := t.createDocument(c, userDoc, userDoc.Key); err != nil { - t.reportFailure(test.NewFailure("Failed to create document: %#v", err)) - } else { - userDoc.rev = rev - c.existingDocs[userDoc.Key] = userDoc - } - t.actions++ - } - return nil -} - -func (c *collection) createNewKey(record bool) string { - for { - key := fmt.Sprintf("newkey%07d", rand.Int31n(100*1000)) - _, found := c.existingDocs[key] - if !found { - if record { - c.existingDocs[key] = UserDocument{} - } - return key - } - } -} - -func (c *collection) removeExistingKey(key string) { - delete(c.existingDocs, key) -} - -func (c *collection) selectRandomKey() string { - index := rand.Intn(len(c.existingDocs)) - for k := range c.existingDocs { - if index == 0 { - return k - } - index-- - } - return "" // This should never be reached when len(t.existingDocs) > 0 -} diff --git a/tests/duration/simple/simple_collection.go b/tests/duration/simple/simple_collection.go deleted file mode 100644 index 123d6d6b1..000000000 --- a/tests/duration/simple/simple_collection.go +++ /dev/null @@ -1,94 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// createCollection creates a new collection. -// The operation is expected to succeed. -func (t *simpleTest) createCollection(c *collection, numberOfShards, replicationFactor int) error { - ctx := context.Background() - opts := &driver.CreateCollectionOptions{ - NumberOfShards: numberOfShards, - ReplicationFactor: replicationFactor, - } - t.log.Info().Msgf("Creating collection '%s' with numberOfShards=%d, replicationFactor=%d...", c.name, numberOfShards, replicationFactor) - if _, err := t.db.CreateCollection(ctx, c.name, opts); err != nil { - // This is a failure - t.reportFailure(test.NewFailure("Failed to create collection '%s': %v", c.name, err)) - return maskAny(err) - } else if driver.IsConflict(err) { - // Duplicate name, check if that is correct - if exists, checkErr := t.collectionExists(c); checkErr != nil { - t.log.Error().Msgf("Failed to check if collection exists: %v", checkErr) - t.reportFailure(test.NewFailure("Failed to create collection '%s': %v and cannot check existance: %v", c.name, err, checkErr)) - return maskAny(err) - } else if !exists { - // Collection has not been created, so 409 status is really wrong - t.reportFailure(test.NewFailure("Failed to create collection '%s': 409 reported but collection does not exist", c.name)) - return maskAny(fmt.Errorf("Create collection reported 409, but collection does not exist")) - } - } - t.log.Info().Msgf("Creating collection '%s' with numberOfShards=%d, replicationFactor=%d succeeded", c.name, numberOfShards, replicationFactor) - return nil -} - -// removeCollection remove an existing collection. -// The operation is expected to succeed. -func (t *simpleTest) removeExistingCollection(c *collection) error { - ctx := context.Background() - t.log.Info().Msgf("Removing collection '%s'...", c.name) - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return maskAny(err) - } - if err := col.Remove(ctx); err != nil { - // This is a failure - t.removeExistingCollectionCounter.failed++ - t.reportFailure(test.NewFailure("Failed to remove collection '%s': %v", c.name, err)) - return maskAny(err) - } - t.removeExistingCollectionCounter.succeeded++ - t.log.Info().Msgf("Removing collection '%s' succeeded", c.name) - t.unregisterCollection(c) - return nil -} - -// collectionExists tries to fetch information about the collection to see if it exists. -func (t *simpleTest) collectionExists(c *collection) (bool, error) { - ctx := context.Background() - t.log.Info().Msgf("Checking collection '%s'...", c.name) - if found, err := t.db.CollectionExists(ctx, c.name); err != nil { - // This is a failure - return false, maskAny(err) - } else { - return found, nil - } -} diff --git a/tests/duration/simple/simple_create.go b/tests/duration/simple/simple_create.go deleted file mode 100644 index c55548db7..000000000 --- a/tests/duration/simple/simple_create.go +++ /dev/null @@ -1,50 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// createDocument creates a new document. -// The operation is expected to succeed. -func (t *simpleTest) createDocument(c *collection, document interface{}, key string) (string, error) { - ctx := context.Background() - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return "", maskAny(err) - } - t.log.Info().Msgf("Creating document '%s' in '%s'...", key, c.name) - m, err := col.CreateDocument(ctx, document) - if err != nil { - // This is a failure - t.createCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create document with key '%s' in collection '%s': %v", key, c.name, err)) - return "", maskAny(err) - } - t.createCounter.succeeded++ - t.log.Info().Msgf("Creating document '%s' in '%s' succeeded", key, c.name) - return m.Rev, nil -} diff --git a/tests/duration/simple/simple_import.go b/tests/duration/simple/simple_import.go deleted file mode 100644 index 4396de698..000000000 --- a/tests/duration/simple/simple_import.go +++ /dev/null @@ -1,79 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "bytes" - "context" - "fmt" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// createImportDocument creates a #document based import file. -func (t *simpleTest) createImportDocument() ([]byte, []UserDocument) { - buf := &bytes.Buffer{} - docs := make([]UserDocument, 0, 10000) - fmt.Fprintf(buf, `[ "_key", "value", "name", "odd" ]`) - fmt.Fprintln(buf) - for i := 0; i < 10000; i++ { - key := fmt.Sprintf("docimp%05d", i) - userDoc := UserDocument{ - Key: key, - Value: i, - Name: fmt.Sprintf("Imported %d", i), - Odd: i%2 == 0, - } - docs = append(docs, userDoc) - fmt.Fprintf(buf, `[ "%s", %d, "%s", %v ]`, userDoc.Key, userDoc.Value, userDoc.Name, userDoc.Odd) - fmt.Fprintln(buf) - } - return buf.Bytes(), docs -} - -// importDocuments imports a bulk set of documents. -// The operation is expected to succeed. -func (t *simpleTest) importDocuments(c *collection) error { - ctx := context.Background() - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return maskAny(err) - } - _, docs := t.createImportDocument() - t.log.Info().Msgf("Importing %d documents ('%s' - '%s') into '%s'...", len(docs), docs[0].Key, docs[len(docs)-1].Key, c.name) - _, errs, err := col.CreateDocuments(ctx, docs) - if err != nil { - // This is a failure - t.importCounter.failed++ - t.reportFailure(test.NewFailure("Failed to import documents in collection '%s': %v", c.name, err)) - return maskAny(err) - } - for i, d := range docs { - if errs[i] == nil { - c.existingDocs[d.Key] = d - } - } - t.importCounter.succeeded++ - t.log.Info().Msgf("Importing %d documents ('%s' - '%s') into '%s' succeeded", len(docs), docs[0].Key, docs[len(docs)-1].Key, c.name) - return nil -} diff --git a/tests/duration/simple/simple_query.go b/tests/duration/simple/simple_query.go deleted file mode 100644 index 984b866c6..000000000 --- a/tests/duration/simple/simple_query.go +++ /dev/null @@ -1,66 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// queryDocumentsLongRunning runs a long running AQL query. -// The operation is expected to succeed. -func (t *simpleTest) queryDocumentsLongRunning(c *collection) error { - if len(c.existingDocs) < 10 { - t.log.Info().Msgf("Skipping query test, we need 10 or more documents") - return nil - } - - ctx := context.Background() - ctx = driver.WithQueryCount(ctx) - - t.log.Info().Msgf("Creating long running AQL query for '%s'...", c.name) - query := fmt.Sprintf("FOR d IN %s LIMIT 10 RETURN {d:d, s:SLEEP(2)}", c.name) - cursor, err := t.db.Query(ctx, query, nil) - if err != nil { - // This is a failure - t.queryLongRunningCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create long running AQL cursor in collection '%s': %v", c.name, err)) - return maskAny(err) - } - cursor.Close() - resultCount := cursor.Count() - t.queryLongRunningCounter.succeeded++ - t.log.Info().Msgf("Creating long running AQL query for collection '%s' succeeded", c.name) - - // We should've fetched all documents, check result count - if resultCount != 10 { - t.reportFailure(test.NewFailure("Number of documents was %d, expected 10", resultCount)) - return maskAny(fmt.Errorf("Number of documents was %d, expected 10", resultCount)) - } - - return nil -} diff --git a/tests/duration/simple/simple_query_update.go b/tests/duration/simple/simple_query_update.go deleted file mode 100644 index 4fd69eb88..000000000 --- a/tests/duration/simple/simple_query_update.go +++ /dev/null @@ -1,115 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - "time" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// queryUpdateDocuments runs an AQL update query. -// The operation is expected to succeed. -func (t *simpleTest) queryUpdateDocuments(c *collection, key string) (string, error) { - ctx := context.Background() - ctx = driver.WithQueryCount(ctx) - - t.log.Info().Msgf("Creating update AQL query for collection '%s'...", c.name) - newName := fmt.Sprintf("AQLUpdate name %s", time.Now()) - query := fmt.Sprintf("UPDATE \"%s\" WITH { name: \"%s\" } IN %s RETURN NEW", key, newName, c.name) - cursor, err := t.db.Query(ctx, query, nil) - if err != nil { - // This is a failure - t.queryUpdateCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create update AQL cursor in collection '%s': %v", c.name, err)) - return "", maskAny(err) - } - var resultDocument UserDocument - m, err := cursor.ReadDocument(ctx, &resultDocument) - if err != nil { - // This is a failure - t.queryUpdateCounter.failed++ - t.reportFailure(test.NewFailure("Failed to read document from cursor in collection '%s': %v", c.name, err)) - return "", maskAny(err) - } - resultCount := cursor.Count() - cursor.Close() - if resultCount != 1 { - // This is a failure - t.queryUpdateCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create update AQL cursor in collection '%s': expected 1 result, got %d", c.name, resultCount)) - return "", maskAny(fmt.Errorf("Number of documents was %d, expected 1", resultCount)) - } - - // Update document - c.existingDocs[key] = resultDocument - t.queryUpdateCounter.succeeded++ - t.log.Info().Msgf("Creating update AQL query for collection '%s' succeeded", c.name) - - return m.Rev, nil -} - -// queryUpdateDocumentsLongRunning runs a long running AQL update query. -// The operation is expected to succeed. -func (t *simpleTest) queryUpdateDocumentsLongRunning(c *collection, key string) (string, error) { - ctx := context.Background() - ctx = driver.WithQueryCount(ctx) - - t.log.Info().Msgf("Creating long running update AQL query for collection '%s'...", c.name) - newName := fmt.Sprintf("AQLLongRunningUpdate name %s", time.Now()) - query := fmt.Sprintf("UPDATE \"%s\" WITH { name: \"%s\", unknown: SLEEP(15) } IN %s RETURN NEW", key, newName, c.name) - cursor, err := t.db.Query(ctx, query, nil) - if err != nil { - // This is a failure - t.queryUpdateLongRunningCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create long running update AQL cursor in collection '%s': %v", c.name, err)) - return "", maskAny(err) - } - var resultDocument UserDocument - m, err := cursor.ReadDocument(ctx, &resultDocument) - if err != nil { - // This is a failure - t.queryUpdateCounter.failed++ - t.reportFailure(test.NewFailure("Failed to read document from cursor in collection '%s': %v", c.name, err)) - return "", maskAny(err) - } - resultCount := cursor.Count() - cursor.Close() - if resultCount != 1 { - // This is a failure - t.queryUpdateLongRunningCounter.failed++ - t.reportFailure(test.NewFailure("Failed to create long running update AQL cursor in collection '%s': expected 1 result, got %d", c.name, resultCount)) - return "", maskAny(fmt.Errorf("Number of documents was %d, expected 1", resultCount)) - } - - // Update document - c.existingDocs[key] = resultDocument - t.queryUpdateLongRunningCounter.succeeded++ - t.log.Info().Msgf("Creating long running update AQL query for collection '%s' succeeded", c.name) - - return m.Rev, nil -} diff --git a/tests/duration/simple/simple_read.go b/tests/duration/simple/simple_read.go deleted file mode 100644 index 12a0cc4a7..000000000 --- a/tests/duration/simple/simple_read.go +++ /dev/null @@ -1,88 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// readExistingDocument reads an existing document with an optional explicit revision. -// The operation is expected to succeed. -func (t *simpleTest) readExistingDocument(c *collection, key string, updateRevision bool) error { - ctx := context.Background() - var result UserDocument - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return maskAny(err) - } - _, err = col.ReadDocument(ctx, key, &result) - if err != nil { - // This is a failure - t.readExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to read existing document '%s' in collection '%s': %v", key, c.name, err)) - return maskAny(err) - } - // Compare document against expected document - - expected := c.existingDocs[key] - if result.Value != expected.Value || result.Name != expected.Name || result.Odd != expected.Odd { - // This is a failure - t.readExistingCounter.failed++ - t.reportFailure(test.NewFailure("Read existing document '%s' returned different values '%s': got %v expected %v", key, c.name, result, expected)) - return maskAny(fmt.Errorf("Read returned invalid values")) - } - - if updateRevision { - // Store read document so we have the last revision - c.existingDocs[key] = result - } - t.readExistingCounter.succeeded++ - t.log.Info().Msgf("Reading existing document '%s' from '%s' succeeded", key, c.name) - return nil -} - -// readNonExistingDocument reads a non-existing document. -// The operation is expected to fail. -func (t *simpleTest) readNonExistingDocument(collectionName string, key string) error { - ctx := context.Background() - var result UserDocument - t.log.Info().Msgf("Reading non-existing document '%s' from '%s'...", key, collectionName) - col, err := t.db.Collection(ctx, collectionName) - if err != nil { - return maskAny(err) - } - if _, err := col.ReadDocument(ctx, key, &result); !driver.IsNotFound(err) { - // This is a failure - t.readNonExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to read non-existing document '%s' in collection '%s': %v", key, collectionName, err)) - return maskAny(err) - } - t.readNonExistingCounter.succeeded++ - t.log.Info().Msgf("Reading non-existing document '%s' from '%s' succeeded", key, collectionName) - return nil -} diff --git a/tests/duration/simple/simple_rebalance.go b/tests/duration/simple/simple_rebalance.go deleted file mode 100644 index 205ab74b8..000000000 --- a/tests/duration/simple/simple_rebalance.go +++ /dev/null @@ -1,40 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -// rebalanceShards attempts to rebalance shards over the existing servers. -// The operation is expected to succeed. -func (t *simpleTest) rebalanceShards() error { - /*opts := struct{}{} - operationTimeout, retryTimeout := t.OperationTimeout, t.RetryTimeout - t.log.Info().Msgf("Rebalancing shards...") - if _, err := t.client.Post("/_admin/cluster/rebalanceShards", nil, nil, opts, "", nil, []int{202}, []int{400, 403, 503}, operationTimeout, retryTimeout); err != nil { - // This is a failure - t.rebalanceShardsCounter.failed++ - t.reportFailure(test.NewFailure("Failed to rebalance shards: %v", err)) - return maskAny(err) - } - t.rebalanceShardsCounter.succeeded++ - t.log.Info().Msgf("Rebalancing shards succeeded")*/ - return nil -} diff --git a/tests/duration/simple/simple_remove.go b/tests/duration/simple/simple_remove.go deleted file mode 100644 index 2bea13ea2..000000000 --- a/tests/duration/simple/simple_remove.go +++ /dev/null @@ -1,71 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// removeExistingDocument removes an existing document with an optional explicit revision. -// The operation is expected to succeed. -func (t *simpleTest) removeExistingDocument(collectionName string, key string) error { - ctx := context.Background() - col, err := t.db.Collection(ctx, collectionName) - if err != nil { - return maskAny(err) - } - t.log.Info().Msgf("Removing existing document '%s' from '%s'...", key, collectionName) - if _, err := col.RemoveDocument(ctx, key); err != nil { - // This is a failure - t.deleteExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to delete existing document '%s' in collection '%s': %v", key, collectionName, err)) - return maskAny(err) - } - t.deleteExistingCounter.succeeded++ - t.log.Info().Msgf("Removing existing document '%s' from '%s' succeeded", key, collectionName) - return nil -} - -// removeNonExistingDocument removes a non-existing document. -// The operation is expected to fail. -func (t *simpleTest) removeNonExistingDocument(collectionName string, key string) error { - ctx := context.Background() - col, err := t.db.Collection(ctx, collectionName) - if err != nil { - return maskAny(err) - } - t.log.Info().Msgf("Removing non-existing document '%s' from '%s'...", key, collectionName) - if _, err := col.RemoveDocument(ctx, key); !driver.IsNotFound(err) { - // This is a failure - t.deleteNonExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to delete non-existing document '%s' in collection '%s': %v", key, collectionName, err)) - return maskAny(err) - } - t.deleteNonExistingCounter.succeeded++ - t.log.Info().Msgf("Removing non-existing document '%s' from '%s' succeeded", key, collectionName) - return nil -} diff --git a/tests/duration/simple/simple_replace.go b/tests/duration/simple/simple_replace.go deleted file mode 100644 index e0c2634b7..000000000 --- a/tests/duration/simple/simple_replace.go +++ /dev/null @@ -1,92 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - "math/rand" - "time" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// replaceExistingDocument replaces an existing document with an optional explicit revision. -// The operation is expected to succeed. -func (t *simpleTest) replaceExistingDocument(c *collection, key string) (string, error) { - ctx := context.Background() - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return "", maskAny(err) - } - newName := fmt.Sprintf("Updated name %s", time.Now()) - t.log.Info().Msgf("Replacing existing document '%s' in '%s' (name -> '%s')...", key, c.name, newName) - newDoc := UserDocument{ - Key: key, - Name: fmt.Sprintf("Replaced named %s", key), - Value: rand.Int(), - Odd: rand.Int()%2 == 0, - } - m, err := col.ReplaceDocument(ctx, key, newDoc) - if err != nil { - // This is a failure - t.replaceExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to replace existing document '%s' in collection '%s': %v", key, c.name, err)) - return "", maskAny(err) - } - // Update internal doc - newDoc.rev = m.Rev - c.existingDocs[key] = newDoc - t.replaceExistingCounter.succeeded++ - t.log.Info().Msgf("Replacing existing document '%s' in '%s' (name -> '%s') succeeded", key, c.name, newName) - return m.Rev, nil -} - -// replaceNonExistingDocument replaces a non-existing document. -// The operation is expected to fail. -func (t *simpleTest) replaceNonExistingDocument(collectionName string, key string) error { - ctx := context.Background() - col, err := t.db.Collection(ctx, collectionName) - if err != nil { - return maskAny(err) - } - newName := fmt.Sprintf("Updated non-existing name %s", time.Now()) - t.log.Info().Msgf("Replacing non-existing document '%s' in '%s' (name -> '%s')...", key, collectionName, newName) - newDoc := UserDocument{ - Key: key, - Name: fmt.Sprintf("Replaced named %s", key), - Value: rand.Int(), - Odd: rand.Int()%2 == 0, - } - if _, err := col.ReplaceDocument(ctx, key, newDoc); !driver.IsNotFound(err) { - // This is a failure - t.replaceNonExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to replace non-existing document '%s' in collection '%s': %v", key, collectionName, err)) - return maskAny(err) - } - t.replaceNonExistingCounter.succeeded++ - t.log.Info().Msgf("Replacing non-existing document '%s' in '%s' (name -> '%s') succeeded", key, collectionName, newName) - return nil -} diff --git a/tests/duration/simple/simple_update.go b/tests/duration/simple/simple_update.go deleted file mode 100644 index ca7f2817f..000000000 --- a/tests/duration/simple/simple_update.go +++ /dev/null @@ -1,87 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package simple - -import ( - "context" - "fmt" - "time" - - driver "github.com/arangodb/go-driver" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -// updateExistingDocument updates an existing document with an optional explicit revision. -// The operation is expected to succeed. -func (t *simpleTest) updateExistingDocument(c *collection, key string) (string, error) { - ctx := context.Background() - col, err := t.db.Collection(ctx, c.name) - if err != nil { - return "", maskAny(err) - } - newName := fmt.Sprintf("Updated name %s", time.Now()) - t.log.Info().Msgf("Updating existing document '%s' in '%s' (name -> '%s')...", key, c.name, newName) - delta := map[string]interface{}{ - "name": newName, - } - doc := c.existingDocs[key] - m, err := col.UpdateDocument(ctx, key, delta) - if err != nil { - // This is a failure - t.updateExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to update existing document '%s' in collection '%s': %v", key, c.name, err)) - return "", maskAny(err) - } - // Update internal doc - doc.Name = newName - doc.rev = m.Rev - c.existingDocs[key] = doc - t.updateExistingCounter.succeeded++ - t.log.Info().Msgf("Updating existing document '%s' in '%s' (name -> '%s') succeeded", key, c.name, newName) - return m.Rev, nil -} - -// updateNonExistingDocument updates a non-existing document. -// The operation is expected to fail. -func (t *simpleTest) updateNonExistingDocument(collectionName string, key string) error { - ctx := context.Background() - col, err := t.db.Collection(ctx, collectionName) - if err != nil { - return maskAny(err) - } - newName := fmt.Sprintf("Updated non-existing name %s", time.Now()) - t.log.Info().Msgf("Updating non-existing document '%s' in '%s' (name -> '%s')...", key, collectionName, newName) - delta := map[string]interface{}{ - "name": newName, - } - if _, err := col.UpdateDocument(ctx, key, delta); !driver.IsNotFound(err) { - // This is a failure - t.updateNonExistingCounter.failed++ - t.reportFailure(test.NewFailure("Failed to update non-existing document '%s' in collection '%s': %v", key, collectionName, err)) - return maskAny(err) - } - t.updateNonExistingCounter.succeeded++ - t.log.Info().Msgf("Updating non-existing document '%s' in '%s' (name -> '%s') succeeded", key, collectionName, newName) - return nil -} diff --git a/tests/duration/test/shuffle.go b/tests/duration/test/shuffle.go deleted file mode 100644 index 1451751e9..000000000 --- a/tests/duration/test/shuffle.go +++ /dev/null @@ -1,43 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package test - -import "math/rand" - -// A type, typically a collection, that satisfies shuffle.Interface can be -// shuffled by the routines in this package. -type Interface interface { - // Len is the number of elements in the collection. - Len() int - // Swap swaps the elements with indexes i and j. - Swap(i, j int) -} - -// Shuffle shuffles Data. -func Shuffle(data Interface) { - n := data.Len() - for i := n - 1; i >= 0; i-- { - j := rand.Intn(i + 1) - data.Swap(i, j) - } -} diff --git a/tests/duration/test/test.go b/tests/duration/test/test.go deleted file mode 100644 index 1206cfd6c..000000000 --- a/tests/duration/test/test.go +++ /dev/null @@ -1,66 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package test - -import ( - "fmt" - - driver "github.com/arangodb/go-driver" -) - -type TestScript interface { - Start(client driver.Client, listener TestListener) error - Stop() error - Pause() error - Resume() error - Status() TestStatus -} - -type TestListener interface { - ReportFailure(Failure) -} - -type Counter struct { - Name string - Succeeded int - Failed int -} - -type TestStatus struct { - Active bool - Pausing bool - Failures int - Actions int - Counters []Counter - Messages []string -} - -type Failure struct { - Message string -} - -func NewFailure(msg string, args ...interface{}) Failure { - return Failure{ - Message: fmt.Sprintf(msg, args...), - } -} diff --git a/tests/duration/test_listener.go b/tests/duration/test_listener.go deleted file mode 100644 index e330d4d5e..000000000 --- a/tests/duration/test_listener.go +++ /dev/null @@ -1,88 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package main - -import ( - "sync" - "time" - - "github.com/rs/zerolog" - - "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -const ( - recentFailureTimeout = time.Hour // Disregard failures old than this timeout - requiredRecentFailureSpread = time.Minute * 5 // How far apart the first and last recent failure must be - requiredRecentFailures = 30 // At least so many recent failures are needed to fail the test -) - -type testListener struct { - mutex sync.Mutex - Log zerolog.Logger - FailedCallback func() - recentFailures []time.Time - failed bool -} - -var _ test.TestListener = &testListener{} - -// ReportFailure logs the given failure and keeps track of recent failure timestamps. -func (l *testListener) ReportFailure(f test.Failure) { - l.Log.Error().Msg(f.Message) - - // Remove all old recent failures - l.mutex.Lock() - defer l.mutex.Unlock() - for { - if len(l.recentFailures) == 0 { - break - } - isOld := l.recentFailures[0].Add(recentFailureTimeout).Before(time.Now()) - if isOld { - // Remove first entry - l.recentFailures = l.recentFailures[1:] - } else { - // First failure is not old, keep the list as is - break - } - } - l.recentFailures = append(l.recentFailures, time.Now()) - - // Detect failed state - if len(l.recentFailures) > requiredRecentFailures { - spread := l.recentFailures[len(l.recentFailures)-1].Sub(l.recentFailures[0]) - if spread > requiredRecentFailureSpread { - l.failed = true - if l.FailedCallback != nil { - l.FailedCallback() - } - } - } -} - -// IsFailed returns true when the number of recent failures -// has gone above the set maximum, false otherwise. -func (l *testListener) IsFailed() bool { - return l.failed -} diff --git a/tests/duration/test_loop.go b/tests/duration/test_loop.go deleted file mode 100644 index d23048048..000000000 --- a/tests/duration/test_loop.go +++ /dev/null @@ -1,113 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package main - -import ( - "context" - "os" - "time" - - driver "github.com/arangodb/go-driver" - "github.com/rs/zerolog" - - "github.com/arangodb/kube-arangodb/tests/duration/simple" - t "github.com/arangodb/kube-arangodb/tests/duration/test" -) - -var ( - testPeriod = time.Minute * 2 -) - -// runTestLoop keeps running tests until the given context is canceled. -func runTestLoop(ctx context.Context, client driver.Client, duration time.Duration) { - log := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger() - endTime := time.Now().Add(duration) - reportDir := "." - tests := []t.TestScript{} - tests = append(tests, simple.NewSimpleTest(log, reportDir, simple.SimpleConfig{ - MaxDocuments: 500, - MaxCollections: 50, - })) - - log.Info().Msg("Starting tests") - listener := &testListener{ - Log: log, - FailedCallback: func() { - log.Fatal().Msg("Too many recent failures. Aborting test") - }, - } - for _, tst := range tests { - if err := tst.Start(client, listener); err != nil { - log.Fatal().Err(err).Msg("Failed to start test") - } - } - for { - if err := ctx.Err(); err != nil { - return - } - - // Check end time - if time.Now().After(endTime) { - log.Info().Msgf("Test has run for %s. We're done", duration) - return - } - - // Run tests - log.Info().Msg("Running tests...") - select { - case <-time.After(testPeriod): - // Continue - case <-ctx.Done(): - return - } - - // Pause tests - log.Info().Msg("Pause tests") - for _, tst := range tests { - if err := tst.Pause(); err != nil { - log.Fatal().Err(err).Msg("Failed to pause test") - } - } - - // Wait for tests to really pause - log.Info().Msg("Waiting for tests to reach pausing state") - for _, tst := range tests { - for !tst.Status().Pausing { - select { - case <-time.After(time.Second): - // Continue - case <-ctx.Done(): - return - } - } - } - - // Resume tests - log.Info().Msg("Resuming tests") - for _, tst := range tests { - if err := tst.Resume(); err != nil { - log.Fatal().Err(err).Msg("Failed to resume test") - } - } - } -} diff --git a/tests/environments_test.go b/tests/environments_test.go deleted file mode 100644 index 129f75014..000000000 --- a/tests/environments_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Jan Christoph Uhde -// -package tests - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" -) - -// TestEnvironmentProduction tests if deployment comes up in production environment. -// LONG: The test ensures that the deployment fails if there are -// less nodes available than servers required. -func TestEnvironmentProduction(t *testing.T) { - longOrSkip(t) - - mode := api.DeploymentModeCluster - engine := api.StorageEngineRocksDB - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - - nodeList, err := kubecli.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - t.Fatalf("Unable to receive node list: %v", err) - } - numNodes := len(nodeList.Items) - - c := kubeArangoClient.MustNewClient() - depl := newDeployment(strings.Replace(fmt.Sprintf("tprod-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1)) - depl.Spec.Mode = api.NewMode(mode) - depl.Spec.StorageEngine = api.NewStorageEngine(engine) - depl.Spec.TLS = api.TLSSpec{} - depl.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction) - depl.Spec.DBServers.Count = util.NewInt(numNodes + 1) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // This test failes to validate the spec if no image is set explicitly because this is required in production mode - if depl.Spec.Image == nil { - depl.Spec.Image = util.NewString("arangodb/arangodb:latest") - } - assert.NoError(t, depl.Spec.Validate()) - - dbserverCount := depl.Spec.DBServers.GetCount() - if dbserverCount < 3 { - t.Skipf("Not enough DBServers to run this test: server count %d", dbserverCount) - } - - // Create deployment - if _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}); err != nil { - // REVIEW - should the test already fail here - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - assert.Error(t, err, fmt.Sprintf("Deployment is up and running when it should not! There are not enough nodes(%d) for all DBServers(%d) in production modes.", numNodes, dbserverCount)) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/immutable_test.go b/tests/immutable_test.go deleted file mode 100644 index 47eed1524..000000000 --- a/tests/immutable_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Kaveh Vahedipour -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - "time" - - "github.com/dchest/uniuri" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" -) - -// TestImmutableFields tests that several immutable fields in the deployment -// spec are reverted to their original value. -func TestImmutableFields(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - revertTimeout := time.Second * 30 - - // Prepare deployment config - depl := newDeployment("test-ise-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.SetDefaults(depl.GetName()) - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server to be completely ready - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not up in time: %v", err) - } - - // Try to reset storageEngine =============================================== - if _, err := updateDeployment(c, depl.GetName(), ns, - func(spec *api.DeploymentSpec) { - spec.StorageEngine = api.NewStorageEngine(api.StorageEngineMMFiles) - }); err != nil { - t.Fatalf("Failed to update the StorageEngine setting: %v", err) - } - - // Wait for StorageEngine parameter to be back to RocksDB - if _, err := waitUntilDeployment(c, depl.GetName(), ns, - func(depl *api.ArangoDeployment) error { - if api.StorageEngineOrDefault(depl.Spec.StorageEngine) == api.StorageEngineRocksDB { - return nil - } - return fmt.Errorf("StorageEngine not back to %s", api.StorageEngineRocksDB) - }, revertTimeout); err != nil { - t.Errorf("StorageEngine parameter is mutable: %v", err) - } - - /* - Secrets are a special case that we'll deal with later - // Try to reset the RocksDB encryption key ================================== - if _, err := updateDeployment(c, depl.GetName(), ns, - func(spec *api.DeploymentSpec) { - spec.RocksDB.Encryption.KeySecretName = util.NewString("foobarbaz") - }); err != nil { - t.Fatalf("Failed to update the RocksDB encryption key: %v", err) - } - - // Wait for deployment mode to be set back to cluster - if _, err := waitUntilDeployment(c, depl.GetName(), ns, - func(depl *api.ArangoDeployment) error { - if util.StringOrDefault(depl.Spec.RocksDB.Encryption.KeySecretName) == "test.encryption.keySecretName" { - return nil - } - return fmt.Errorf("RocksDB encryption key not back to %s", "test.encryption.keySecretName") - }, revertTimeout); err != nil { - t.Errorf("RocksDB encryption key is mutable: %v", err) - } - */ - - // Try to reset the deployment type ========================================== - if _, err := updateDeployment(c, depl.GetName(), ns, - func(spec *api.DeploymentSpec) { - spec.Mode = api.NewMode(api.DeploymentModeCluster) - }); err != nil { - t.Fatalf("Failed to update the deployment mode: %v", err) - } - - // Wait for deployment mode to be set back to cluster - if _, err := waitUntilDeployment(c, depl.GetName(), ns, - func(depl *api.ArangoDeployment) error { - expected := api.DeploymentModeSingle - if api.ModeOrDefault(depl.Spec.Mode) == expected { - return nil - } - return fmt.Errorf("Deployment mode not back to %s", expected) - }, revertTimeout); err != nil { - t.Errorf("Deployment mode is mutable: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/load_balancer_source_ranges_test.go b/tests/load_balancer_source_ranges_test.go deleted file mode 100644 index 7ef11346b..000000000 --- a/tests/load_balancer_source_ranges_test.go +++ /dev/null @@ -1,158 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// Author Max Neunhoeffer -// - -package tests - -import ( - "context" - "testing" - "time" - - "github.com/dchest/uniuri" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// tests cursor forwarding with load-balanced conn., specify a source range -func TestLoadBalancingSourceRanges(t *testing.T) { - longOrSkip(t) - - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - namePrefix := "test-lb-src-ranges-" - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Image = util.NewString("arangodb/arangodb:latest") - depl.Spec.ExternalAccess.Type = api.NewExternalAccessType(api.ExternalAccessTypeLoadBalancer) - depl.Spec.ExternalAccess.LoadBalancerSourceRanges = append(depl.Spec.ExternalAccess.LoadBalancerSourceRanges, "1.2.3.0/24", "0.0.0.0/0") - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - clOpts := &DatabaseClientOptions{ - UseVST: false, - ShortTimeout: true, - } - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, clOpts) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Now let's use the k8s api to check if the source ranges are present in - // the external service spec: - svcs := kubecli.CoreV1().Services(ns) - eaServiceName := k8sutil.CreateDatabaseExternalAccessServiceName(depl.GetName()) - // Just in case, give the service some time to appear, it should usually - // be there already, when the deployment is ready, however, we have had - // unstable tests in the past - counter := 0 - var foundExternalIP string - for { - if svc, err := svcs.Get(context.Background(), eaServiceName, metav1.GetOptions{}); err == nil { - spec := svc.Spec - ranges := spec.LoadBalancerSourceRanges - if len(ranges) != 2 { - t.Errorf("LoadBalancerSourceRanges does not have length 2: %v", ranges) - } else { - if ranges[0] != "1.2.3.0/24" { - t.Errorf("Expecting first LoadBalancerSourceRange to be \"1.2.3.0/24\", but ranges are: %v", ranges) - } - if ranges[1] != "0.0.0.0/0" { - t.Errorf("Expecting second LoadBalancerSourceRange to be \"0.0.0.0/0\", but ranges are: %v", ranges) - } - } - foundExternalIP = spec.LoadBalancerIP - break - } - t.Logf("Service %s cannot be found, waiting for some time...", eaServiceName) - time.Sleep(time.Second) - counter++ - if counter >= 60 { - t.Fatalf("Could not find service %s within 60 seconds, giving up.", eaServiceName) - } - } - - // Now change the deployment spec to use different ranges: - _, err = updateDeployment(c, depl.GetName(), ns, - func(spec *api.DeploymentSpec) { - spec.ExternalAccess.LoadBalancerSourceRanges = []string{"4.5.0.0/16"} - }) - if err != nil { - t.Fatalf("Failed to update the deployment") - } - - // And check again: - counter = 0 - for { - time.Sleep(time.Second) - if svc, err := svcs.Get(context.Background(), eaServiceName, metav1.GetOptions{}); err == nil { - spec := svc.Spec - ranges := spec.LoadBalancerSourceRanges - good := true - if len(ranges) != 1 { - t.Logf("LoadBalancerSourceRanges does not have length 1: %v, waiting some more...", ranges) - good = false - } else { - if ranges[0] != "4.5.0.0/16" { - t.Logf("Expecting only LoadBalancerSourceRange to be \"4.5.0.0/16\", but ranges are: %v, waiting some more...", ranges) - good = false - } else { - if spec.LoadBalancerIP != foundExternalIP { - t.Errorf("Oops, the external IP of the external access service has changed: previously: %s, now: %s", foundExternalIP, spec.LoadBalancerIP) - } - } - } - if good { - break - } - } - t.Logf("Service %s cannot be found, waiting for some more time...", eaServiceName) - counter++ - if counter >= 60 { - t.Fatalf("Could not find changed service %s within 60 seconds, giving up.", eaServiceName) - } - } - t.Logf("Success! Service %s was changed correctly.", eaServiceName) -} diff --git a/tests/load_balancer_test.go b/tests/load_balancer_test.go deleted file mode 100644 index 679e564cc..000000000 --- a/tests/load_balancer_test.go +++ /dev/null @@ -1,226 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "reflect" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" -) - -func TestLoadBalancingCursorVST(t *testing.T) { - longOrSkip(t) - // run with VST - loadBalancingCursorSubtest(t, true) -} - -func TestLoadBalancingCursorHTTP(t *testing.T) { - longOrSkip(t) - // run with HTTP - loadBalancingCursorSubtest(t, false) -} - -func wasForwarded(r driver.Response) bool { - h := r.Header("x-arango-request-forwarded-to") - return h != "" -} - -// tests cursor forwarding with load-balanced conn. -func loadBalancingCursorSubtest(t *testing.T, useVst bool) { - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - namePrefix := "test-lb-" - if useVst { - namePrefix += "vst-" - } else { - namePrefix += "http-" - } - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - clOpts := &DatabaseClientOptions{ - UseVST: useVst, - ShortTimeout: true, - } - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, clOpts) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Create data set - collectionData := map[string][]interface{}{ - "books": []interface{}{ - Book{Title: "Book 01"}, - Book{Title: "Book 02"}, - Book{Title: "Book 03"}, - Book{Title: "Book 04"}, - Book{Title: "Book 05"}, - Book{Title: "Book 06"}, - Book{Title: "Book 07"}, - Book{Title: "Book 08"}, - Book{Title: "Book 09"}, - Book{Title: "Book 10"}, - Book{Title: "Book 11"}, - Book{Title: "Book 12"}, - Book{Title: "Book 13"}, - Book{Title: "Book 14"}, - Book{Title: "Book 15"}, - Book{Title: "Book 16"}, - Book{Title: "Book 17"}, - Book{Title: "Book 18"}, - Book{Title: "Book 19"}, - Book{Title: "Book 20"}, - }, - "users": []interface{}{ - UserDoc{Name: "John", Age: 13}, - UserDoc{Name: "Jake", Age: 25}, - UserDoc{Name: "Clair", Age: 12}, - UserDoc{Name: "Johnny", Age: 42}, - UserDoc{Name: "Blair", Age: 67}, - UserDoc{Name: "Zz", Age: 12}, - }, - } - - db := ensureDatabase(ctx, client, "lb_cursor_test", nil, t) - for colName, colDocs := range collectionData { - col := ensureCollection(ctx, db, colName, nil, t) - if _, _, err := col.CreateDocuments(ctx, colDocs); err != nil { - t.Fatalf("Expected success, got %s", err) - } - } - - // Setup tests - tests := []queryTest{ - queryTest{ - Query: "FOR d IN books SORT d.Title RETURN d", - ExpectSuccess: true, - ExpectedDocuments: collectionData["books"], - DocumentType: reflect.TypeOf(Book{}), - }, - } - - var r driver.Response - // Setup context - ctx = driver.WithResponse(driver.WithQueryBatchSize(context.Background(), 1), &r) - - // keep track of whether at least one request was forwarded internally to the - // correct coordinator behind the load balancer - someRequestsForwarded := false - someRequestsNotForwarded := false - - // Run tests for every context alternative - for i, test := range tests { - cursor, err := db.Query(ctx, test.Query, test.BindVars) - if err == nil { - // Close upon exit of the function - defer cursor.Close() - } - if test.ExpectSuccess { - if err != nil { - t.Errorf("Expected success in query %d (%s), got '%s'", i, test.Query, err) - continue - } - if count := cursor.Count(); count != 0 { - t.Errorf("Expected count of 0, got %d in query %d (%s)", count, i, test.Query) - } - var result []interface{} - for { - hasMore := cursor.HasMore() - doc := reflect.New(test.DocumentType) - if _, err := cursor.ReadDocument(ctx, doc.Interface()); driver.IsNoMoreDocuments(err) { - if hasMore { - t.Error("HasMore returned true, but ReadDocument returns a IsNoMoreDocuments error") - } - break - } else if err != nil { - t.Errorf("Failed to result document %d: %s", len(result), err) - } - if !hasMore { - t.Error("HasMore returned false, but ReadDocument returns a document") - } - result = append(result, doc.Elem().Interface()) - if wasForwarded(r) { - someRequestsForwarded = true - } else { - someRequestsNotForwarded = true - } - time.Sleep(200 * time.Millisecond) - } - if len(result) != len(test.ExpectedDocuments) { - t.Errorf("Expected %d documents, got %d in query %d (%s)", len(test.ExpectedDocuments), len(result), i, test.Query) - } else { - for resultIdx, resultDoc := range result { - if !reflect.DeepEqual(resultDoc, test.ExpectedDocuments[resultIdx]) { - t.Errorf("Unexpected document in query %d (%s) at index %d: got %+v, expected %+v", i, test.Query, resultIdx, resultDoc, test.ExpectedDocuments[resultIdx]) - } - } - } - // Close anyway (this tests calling Close more than once) - if err := cursor.Close(); err != nil { - t.Errorf("Expected success in Close of cursor from query %d (%s), got '%s'", i, test.Query, err) - } - } else { - if err == nil { - t.Errorf("Expected error in query %d (%s), got '%s'", i, test.Query, err) - continue - } - } - } - - if !someRequestsForwarded { - t.Error("Did not detect any request being forwarded behind load balancer!") - } - if !someRequestsNotForwarded { - t.Error("Did not detect any request NOT being forwarded behind load balancer!") - } -} diff --git a/tests/member_resilience_test.go b/tests/member_resilience_test.go deleted file mode 100644 index b205f1d5f..000000000 --- a/tests/member_resilience_test.go +++ /dev/null @@ -1,333 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/dchest/uniuri" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -// TestMemberResilienceAgents creates a cluster and removes a -// specific agent pod 5 times. Each time it waits for a new pod to arrive. -// After 5 times, the member should be replaced by another member with the same ID. -func TestMemberResilienceAgents(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-member-res-agnt-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Pick an agent to be deleted 5 times - targetAgent := apiObject.Status.Members.Agents[0] - for i := 0; i < 5; i++ { - // Get current pod so we can compare UID later - originalPod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetAgent.PodName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pod %s: %v", targetAgent.PodName, err) - } - if err := kubecli.CoreV1().Pods(ns).Delete(context.Background(), targetAgent.PodName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pod %s: %v", targetAgent.PodName, err) - } - if i < 4 { - // Wait for pod to return with different UID - op := func() error { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetAgent.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if pod.GetUID() == originalPod.GetUID() { - return fmt.Errorf("Still original pod") - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - t.Fatalf("Pod did not restart: %v", err) - } - } else { - // Wait for member to be replaced - op := func() error { - updatedObject, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - m, _, found := updatedObject.Status.Members.ElementByID(targetAgent.ID) - if !found { - return maskAny(fmt.Errorf("Member %s not found", targetAgent.ID)) - } - if m.CreatedAt.Equal(&targetAgent.CreatedAt) { - return maskAny(fmt.Errorf("Member %s still not replaced", targetAgent.ID)) - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - t.Fatalf("Member failure did not succeed: %v", err) - } - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestMemberResilienceCoordinators creates a cluster and removes a -// specific coordinator pod 5 times. Each time it waits for a new pod to arrive. -// After 5 times, the member should be replaced by another member. -func TestMemberResilienceCoordinators(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-member-res-crdn-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Pick a coordinator to be deleted 5 times - targetCoordinator := apiObject.Status.Members.Coordinators[0] - for i := 0; i < 5; i++ { - // Get current pod so we can compare UID later - originalPod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetCoordinator.PodName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pod %s: %v", targetCoordinator.PodName, err) - } - if err := kubecli.CoreV1().Pods(ns).Delete(context.Background(), targetCoordinator.PodName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pod %s: %v", targetCoordinator.PodName, err) - } - if i < 4 { - // Wait for pod to return with different UID - op := func() error { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetCoordinator.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if pod.GetUID() == originalPod.GetUID() { - return fmt.Errorf("Still original pod") - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - t.Fatalf("Pod did not restart: %v", err) - } - } else { - // Wait for member to be replaced - op := func() error { - updatedObject, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if updatedObject.Status.Members.ContainsID(targetCoordinator.ID) { - return maskAny(fmt.Errorf("Member %s still not replaced", targetCoordinator.ID)) - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - t.Fatalf("Member failure did not succeed: %v", err) - } - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestMemberResilienceDBServers creates a cluster and removes a -// specific dbserver pod 5 times. Each time it waits for a new pod to arrive. -// After 5 times, the member should be replaced by another member. -func TestMemberResilienceDBServers(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-member-res-prmr-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Pick a dbserver to be deleted 5 times - targetServer := apiObject.Status.Members.DBServers[0] - for i := 0; i < 5; i++ { - // Get current pod so we can compare UID later - originalPod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetServer.PodName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pod %s: %v", targetServer.PodName, err) - } - if err := kubecli.CoreV1().Pods(ns).Delete(context.Background(), targetServer.PodName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pod %s: %v", targetServer.PodName, err) - } - if i < 4 { - // Wait for pod to return with different UID - op := func() error { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), targetServer.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if pod.GetUID() == originalPod.GetUID() { - return fmt.Errorf("Still original pod") - } - return nil - } - if err := retry.Retry(op, time.Minute*2); err != nil { - t.Fatalf("Pod %d did not restart: %v", i, err) - } - } else { - // Wait for member to be replaced - op := func() error { - updatedObject, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if updatedObject.Status.Members.ContainsID(targetServer.ID) { - return maskAny(fmt.Errorf("Member %s still not replaced", targetServer.ID)) - } - return nil - } - if err := retry.Retry(op, time.Minute*2); err != nil { - t.Fatalf("Member failure did not succeed: %v", err) - } - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/metrics_test.go b/tests/metrics_test.go deleted file mode 100644 index 630778049..000000000 --- a/tests/metrics_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Max Neunhoeffer -// -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/stretchr/testify/require" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/dchest/uniuri" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" -) - -func TestAddingMetrics(t *testing.T) { - longOrSkip(t) - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - depl := newDeployment(fmt.Sprintf("%s-%s", "arangodb-metrics-test", uniuri.NewLen(4))) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.StorageEngine = api.NewStorageEngine(api.StorageEngineRocksDB) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - require.NoErrorf(t, err, "Create deployment failed") - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - deployment, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - require.NoErrorf(t, err, "Deployment not running in time") - - // Create a database client - ctx := context.Background() - DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil) - err = waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, "") - require.NoErrorf(t, err, "Deployment not healthy in time") - - // Try to switch on metrics: - expectedResourceRequirement := corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - }, - } - - deployment, err = updateDeployment(c, depl.GetName(), ns, func(depl *api.DeploymentSpec) { - depl.Metrics = api.MetricsSpec{ - Enabled: util.NewBool(true), - Image: util.NewString("arangodb/arangodb-exporter:0.1.6"), - Resources: expectedResourceRequirement, - } - }) - require.NoErrorf(t, err, "Failed to add metrics") - t.Log("Updated deployment by adding metrics") - - var resourcesRequirementsExporterCheck api.ServerGroupFunc = func(group api.ServerGroup, spec api.ServerGroupSpec, - status *api.MemberStatusList) error { - - if !group.IsExportMetrics() { - return nil - } - for _, m := range *status { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return err - } - exporter, found := k8sutil.GetContainerByName(pod, k8sutil.ExporterContainerName) - if !found { - return fmt.Errorf("expected exporter to be enabled") - } - - if k8sutil.IsResourceRequirementsChanged(expectedResourceRequirement, exporter.Resources) { - return fmt.Errorf("resources have not been changed: expected %v, actual %v", - expectedResourceRequirement, exporter.Resources) - } - } - return nil - } - - _, err = waitUntilDeploymentMembers(c, deployment.GetName(), ns, resourcesRequirementsExporterCheck, 7*time.Minute) - require.NoError(t, err) - - expectedResourceRequirement.Requests[v1.ResourceCPU] = resource.MustParse("110m") - deployment, err = updateDeployment(c, depl.GetName(), ns, func(depl *api.DeploymentSpec) { - depl.Metrics.Resources = expectedResourceRequirement - }) - require.NoErrorf(t, err, "failed to change resource requirements for metrics") - t.Log("Updated deployment by changing metrics") - _, err = waitUntilDeploymentMembers(c, deployment.GetName(), ns, resourcesRequirementsExporterCheck, 7*time.Minute) - require.NoError(t, err) - - err = waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, "") - require.NoErrorf(t, err, "Deployment not healthy in time") - t.Log("Deployment healthy") - - _, err = waitUntilService(kubecli, depl.GetName()+"-exporter", ns, - func(service *corev1.Service) error { - return nil - }, time.Second*30) - require.NoErrorf(t, err, "Exporter service did not show up in time") - t.Log("Found exporter service") - - _, err = waitUntilEndpoints(kubecli, depl.GetName()+"-exporter", ns, - func(endpoints *corev1.Endpoints) error { - count := 0 - for _, subset := range endpoints.Subsets { - count += len(subset.Addresses) - } - t.Logf("Found %d endpoints in the Endpoints resource", count) - if count < 6 { - return errors.New("did not find enough endpoints in Endpoints resource") - } - return nil - }, time.Second*360) // needs a full rotation with extra containers - require.NoErrorf(t, err, "Exporter endpoints did not show up in time") - t.Log("Found exporter endpoints") - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/operator_upgrade_test.go b/tests/operator_upgrade_test.go deleted file mode 100644 index a34610316..000000000 --- a/tests/operator_upgrade_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/fields" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/dchest/uniuri" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" -) - -const ( - operatorTestDeploymentName string = "arango-deployment-operator" - oldOperatorTestImage string = "arangodb/kube-arangodb:0.3.16" -) - -func TestOperatorUpgradeFrom038(t *testing.T) { - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - if err := waitForArangoDBPodsGone(ns, kubecli); err != nil { - t.Fatalf("Remaining arangodb pods did not vanish, can not start test: %v", err) - } - - currentimage, err := updateOperatorImage(ns, kubecli, oldOperatorTestImage) - if err != nil { - t.Fatalf("Could not replace operator with old image: %v", err) - } - defer updateOperatorImage(ns, kubecli, currentimage) - - if err := waitForOperatorImage(ns, kubecli, oldOperatorTestImage); err != nil { - t.Fatalf("Old Operator not ready in time: %v", err) - } - - depl := newDeployment(fmt.Sprintf("opup-%s", uniuri.NewLen(4))) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - if _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}); err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - podsWatcher, err := kubecli.CoreV1().Pods(ns).Watch(context.Background(), metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", "arangodb").String(), - }) - if err != nil { - t.Fatalf("Failed to watch pods: %v", err) - } - defer podsWatcher.Stop() - - errorChannel := make(chan error) - go func() { - var addedPods []string - for { - select { - case ev, ok := <-podsWatcher.ResultChan(): - if !ok { - return // Abort - } - if pod, ok := ev.Object.(*v1.Pod); ok { - if k8sutil.IsArangoDBImageIDAndVersionPod(pod) { - continue - } - - switch ev.Type { - case watch.Modified: - if !k8sutil.IsPodReady(pod) { - errorChannel <- fmt.Errorf("Pod no longer ready: %s", pod.GetName()) - } - case watch.Deleted: - errorChannel <- fmt.Errorf("Pod was deleted: %s", pod.GetName()) - case watch.Added: - if len(addedPods) >= 9 { - errorChannel <- fmt.Errorf("New pod was created: %s", pod.GetName()) - } - addedPods = append(addedPods, pod.GetName()) - } - } - } - } - }() - - if _, err := updateOperatorImage(ns, kubecli, currentimage); err != nil { - t.Fatalf("Failed to replace new ") - } - - if err := waitForOperatorImage(ns, kubecli, currentimage); err != nil { - t.Fatalf("New operator not ready in time: %v", err) - } - - select { - case <-time.After(1 * time.Minute): - break // cool - case err := <-errorChannel: - // not cool - t.Errorf("Deployment had error: %v", err) - } -} - -func updateOperatorImage(ns string, kube kubernetes.Interface, newImage string) (string, error) { - for { - depl, err := kube.AppsV1().Deployments(ns).Get(context.Background(), operatorTestDeploymentName, metav1.GetOptions{}) - if err != nil { - return "", err - } - old, err := getOperatorImage(depl) - if err != nil { - return "", err - } - setOperatorImage(depl, newImage) - if _, err := kube.AppsV1().Deployments(ns).Update(context.Background(), depl, metav1.UpdateOptions{}); k8sutil.IsConflict(err) { - continue - } else if err != nil { - return "", err - } - return old, nil - } -} - -func getOperatorImage(depl *appsv1.Deployment) (string, error) { - for _, c := range depl.Spec.Template.Spec.Containers { - if c.Name == "operator" { - return c.Image, nil - } - } - - return "", fmt.Errorf("Operator container not found") -} - -func setOperatorImage(depl *appsv1.Deployment, image string) { - for i := range depl.Spec.Template.Spec.Containers { - c := &depl.Spec.Template.Spec.Containers[i] - if c.Name == "operator" { - c.Image = image - } - } -} - -func waitForArangoDBPodsGone(ns string, kube kubernetes.Interface) error { - return retry.Retry(func() error { - _, err := kube.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", "arangodb").String(), - }) - if k8sutil.IsNotFound(err) { - return nil - } - return err - }, deploymentReadyTimeout) -} - -func waitForOperatorImage(ns string, kube kubernetes.Interface, image string) error { - return retry.Retry(func() error { - pods, err := kube.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{ - LabelSelector: fields.OneTermEqualSelector("app", operatorTestDeploymentName).String(), - }) - if err != nil { - return err - } - for _, pod := range pods.Items { - for _, c := range pod.Spec.Containers { - if c.Name == "operator" { - if c.Image != image { - return fmt.Errorf("in pod %s found image %s, expected %s", pod.Name, c.Image, image) - } - } - } - } - return nil - }, deploymentReadyTimeout) -} diff --git a/tests/pc_test.go b/tests/pc_test.go deleted file mode 100644 index 0c32be1e5..000000000 --- a/tests/pc_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/dchest/uniuri" - v1beta1 "k8s.io/api/scheduling/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" -) - -func waitForPriorityOfServerGroup(kube kubernetes.Interface, c versioned.Interface, depl, ns string, group api.ServerGroup, priority int32) error { - return retry.Retry(func() error { - - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, m := range apiObject.Status.Members.MembersOfGroup(group) { - pod, err := kube.CoreV1().Pods(apiObject.Namespace).Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return err - } - - if pod.Spec.Priority == nil { - return fmt.Errorf("No pod priority set") - } - - if *pod.Spec.Priority != priority { - return fmt.Errorf("Wrong pod priority, expected %d, found %d", priority, *pod.Spec.Priority) - } - } - - return nil - }, 5*time.Minute) -} - -// TestPriorityClasses creates a PriorityClass and associates coordinators with that class. -// Then check if the pods have the desired priority. Then change the class and check that the pods are rotated. -func TestPriorityClasses(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - lowClassName := "test-low-class" - lowClassValue := int32(1000) - - highClassName := "test-high-class" - highClassValue := int32(2000) - - // Create two priority classes - if _, err := kubecli.SchedulingV1beta1().PriorityClasses().Create(context.Background(), &v1beta1.PriorityClass{ - Value: lowClassValue, - GlobalDefault: false, - Description: "Low priority test class", - ObjectMeta: metav1.ObjectMeta{ - Name: lowClassName, - }, - }, metav1.CreateOptions{}); err != nil { - t.Fatalf("Could not create PC: %v", err) - } - defer kubecli.SchedulingV1beta1().PriorityClasses().Delete(context.Background(), lowClassName, metav1.DeleteOptions{}) - - if _, err := kubecli.SchedulingV1beta1().PriorityClasses().Create(context.Background(), &v1beta1.PriorityClass{ - Value: highClassValue, - GlobalDefault: false, - Description: "Low priority test class", - ObjectMeta: metav1.ObjectMeta{ - Name: highClassName, - }, - }, metav1.CreateOptions{}); err != nil { - t.Fatalf("Could not create PC: %v", err) - } - defer kubecli.SchedulingV1beta1().PriorityClasses().Delete(context.Background(), highClassName, metav1.DeleteOptions{}) - - // Prepare deployment config - depl := newDeployment("test-pc-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.TLS = api.TLSSpec{CASecretName: util.NewString("None")} - depl.Spec.DBServers.Count = util.NewInt(2) - depl.Spec.Coordinators.Count = util.NewInt(2) - depl.Spec.Coordinators.PriorityClassName = lowClassName - depl.Spec.SetDefaults(depl.GetName()) // this must be last - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - if err := waitForPriorityOfServerGroup(kubecli, c, depl.GetName(), ns, api.ServerGroupCoordinators, lowClassValue); err != nil { - t.Errorf("PDBs not as expected: %v", err) - } - - _, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.Coordinators.PriorityClassName = highClassName - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Check if priority class is updated - if err := waitForPriorityOfServerGroup(kubecli, c, depl.GetName(), ns, api.ServerGroupCoordinators, highClassValue); err != nil { - t.Errorf("Priority not as expected: %v", err) - } - - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/pdb_test.go b/tests/pdb_test.go deleted file mode 100644 index 40b283214..000000000 --- a/tests/pdb_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func min(a int, b int) int { - if a < b { - return a - } - return b -} - -func isPDBAsExpected(kube kubernetes.Interface, name, ns string, expectedMinAvailable int) error { - pdb, err := kube.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return err - } - if pdb.Spec.MinAvailable.IntValue() != expectedMinAvailable { - return fmt.Errorf("PDB %s does not have expected minAvailable, found: %d, expected: %d", name, pdb.Spec.MinAvailable.IntValue(), expectedMinAvailable) - } - return nil -} - -func waitForPDBsOfDeployment(kube kubernetes.Interface, apiObject *api.ArangoDeployment) error { - spec := apiObject.Spec - return retry.Retry(func() error { - if spec.Mode.HasAgents() { - if err := isPDBAsExpected(kube, resources.PDBNameForGroup(apiObject.GetName(), api.ServerGroupAgents), apiObject.GetNamespace(), spec.GetServerGroupSpec(api.ServerGroupAgents).GetCount()-1); err != nil { - return err - } - } - if spec.Mode.HasCoordinators() { - if err := isPDBAsExpected(kube, resources.PDBNameForGroup(apiObject.GetName(), api.ServerGroupCoordinators), apiObject.GetNamespace(), - min(spec.GetServerGroupSpec(api.ServerGroupCoordinators).GetCount()-1, 2)); err != nil { - return err - } - } - if spec.Mode.HasDBServers() { - if err := isPDBAsExpected(kube, resources.PDBNameForGroup(apiObject.GetName(), api.ServerGroupDBServers), apiObject.GetNamespace(), spec.GetServerGroupSpec(api.ServerGroupDBServers).GetCount()-1); err != nil { - return err - } - } - return nil - }, 20*time.Second) -} - -// TestPDBCreate create a deployment and check if the PDBs are created. Then rescale the cluster and check if the PDBs are -// modified accordingly. -func TestPDBCreate(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-pdb-create-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction) - depl.Spec.TLS = api.TLSSpec{CASecretName: util.NewString("None")} - depl.Spec.DBServers.Count = util.NewInt(2) - depl.Spec.Coordinators.Count = util.NewInt(2) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // This test failes to validate the spec if no image is set explicitly because this is required in production mode - if depl.Spec.Image == nil { - depl.Spec.Image = util.NewString("arangodb/arangodb:latest") - } - assert.NoError(t, depl.Spec.Validate()) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - if err := waitForPDBsOfDeployment(kubecli, apiObject); err != nil { - t.Errorf("PDBs not as expected: %v", err) - } - - apiObject, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(3) - spec.Coordinators.Count = util.NewInt(3) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - // Check if the PDBs have grown too - if err := waitForPDBsOfDeployment(kubecli, apiObject); err != nil { - t.Errorf("PDBs not as expected: %v", err) - } - - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/persistent_volumes_test.go b/tests/persistent_volumes_test.go deleted file mode 100644 index bf2bf75f7..000000000 --- a/tests/persistent_volumes_test.go +++ /dev/null @@ -1,488 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Jan Christoph Uhde -// -package tests - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "k8s.io/client-go/kubernetes" - - storagev1 "k8s.io/api/storage/v1" - - "github.com/arangodb/kube-arangodb/pkg/util" - - "github.com/stretchr/testify/require" - - "github.com/arangodb/arangosync-client/pkg/retry" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - //"github.com/arangodb/kube-arangodb/pkg/util" -) - -// TODO - add description -func TestPVCExists(t *testing.T) { - longOrSkip(t) - - k8sNameSpace := getNamespace(t) - //k8sClient := mustNewKubeClient(t) - - // volumesList, err := k8sClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) - // assert.NoError(t, err, "error while listing volumes") - // claimsList, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).List(metav1.ListOptions{}) - // assert.NoError(t, err, "error while listing volume claims") - - // fmt.Printf("----------------------------------------") - // fmt.Printf("%v %v", volumesList, claimsList) - // fmt.Printf("----------------------------------------") - // fmt.Printf("%v %v", len(volumesList.Items), len(claimsList.Items)) - // fmt.Printf("----------------------------------------") - - mode := api.DeploymentModeCluster - engine := api.StorageEngineRocksDB - - deploymentClient := kubeArangoClient.MustNewClient() - deploymentTemplate := newDeployment(strings.Replace(fmt.Sprintf("tpers-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1)) - deploymentTemplate.Spec.Mode = api.NewMode(mode) - deploymentTemplate.Spec.StorageEngine = api.NewStorageEngine(engine) - deploymentTemplate.Spec.TLS = api.TLSSpec{} - //deploymentTemplate.Spec.Environment = api.NewEnvironment(api.EnvironmentDevelopment) - //deploymentTemplate.Spec.Image = util.NewString("arangodb/arangodb:3.3.4") - //deploymentTemplate.Spec.DBServers.Count = util.NewInt(numNodes + 1) - deploymentTemplate.Spec.SetDefaults(deploymentTemplate.GetName()) // this must be last - assert.NoError(t, deploymentTemplate.Spec.Validate()) - - // Create deployment - _, err := deploymentClient.DatabaseV1().ArangoDeployments(k8sNameSpace).Create(context.Background(), deploymentTemplate, metav1.CreateOptions{}) - assert.NoError(t, err, "failed to create deplyment: %s", err) - - _, err = waitUntilDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, deploymentIsReady()) - assert.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) // <-- fails here at the moment - - // TODO - add tests that check the number of volumes and claims - - // Cleanup - removeDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace) -} - -func TestPVCResize(t *testing.T) { - longOrSkip(t) - - k8sNameSpace := getNamespace(t) - k8sClient := mustNewKubeClient(t) - - mode := api.DeploymentModeCluster - engine := api.StorageEngineRocksDB - - size10GB, _ := resource.ParseQuantity("10Gi") - size08GB, _ := resource.ParseQuantity("8Gi") - - deploymentClient := kubeArangoClient.MustNewClient() - deploymentTemplate := newDeployment(strings.Replace(fmt.Sprintf("trsz-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1)) - deploymentTemplate.Spec.Mode = api.NewMode(mode) - deploymentTemplate.Spec.StorageEngine = api.NewStorageEngine(engine) - deploymentTemplate.Spec.TLS = api.TLSSpec{} - deploymentTemplate.Spec.DBServers.Resources.Requests = corev1.ResourceList{corev1.ResourceStorage: size08GB} - deploymentTemplate.Spec.SetDefaults(deploymentTemplate.GetName()) // this must be last - assert.NoError(t, deploymentTemplate.Spec.Validate()) - - // Create deployment - _, err := deploymentClient.DatabaseV1().ArangoDeployments(k8sNameSpace).Create(context.Background(), deploymentTemplate, metav1.CreateOptions{}) - defer removeDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace) - assert.NoError(t, err, "failed to create deplyment: %s", err) - - depl, err := waitUntilDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, deploymentIsReady()) - assert.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - - // Get list of all pvcs for dbservers - for _, m := range depl.Status.Members.DBServers { - pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - assert.NoError(t, err, "failed to get pvc: %s", err) - volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - assert.True(t, ok, "pvc does not have storage resource") - assert.True(t, volumeSize.Cmp(size08GB) == 0, "wrong volume size: expected: %s, found: %s", size08GB.String(), volumeSize.String()) - } - - // Update the deployment - // Try to change image version - depl, err = updateDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, - func(depl *api.DeploymentSpec) { - depl.DBServers.Resources.Requests[corev1.ResourceStorage] = size10GB - }) - if err != nil { - t.Fatalf("Failed to update the deployment") - } else { - t.Log("Updated deployment") - } - - if err := retry.Retry(func() error { - // Get list of all pvcs for dbservers and check for new size - for _, m := range depl.Status.Members.DBServers { - pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - return err - } - volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - if !ok { - return fmt.Errorf("pvc does not have storage resource") - } - if volumeSize.Cmp(size10GB) != 0 { - return fmt.Errorf("wrong pvc size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) - } - volume, err := k8sClient.CoreV1().PersistentVolumes().Get(context.Background(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - return err - } - volumeSize, ok = volume.Spec.Capacity[corev1.ResourceStorage] - if !ok { - return fmt.Errorf("pv does not have storage resource") - } - if volumeSize.Cmp(size10GB) != 0 { - return fmt.Errorf("wrong volume size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) - } - if k8sutil.IsPersistentVolumeClaimFileSystemResizePending(pvc) { - return fmt.Errorf("persistent volume claim file system resize pending") - } - } - return nil - }, 5*time.Minute); err != nil { - t.Fatalf("PVCs not resized: %s", err.Error()) - } - -} - -func TestPVCTemplateResize(t *testing.T) { - longOrSkip(t) - - k8sNameSpace := getNamespace(t) - k8sClient := mustNewKubeClient(t) - - mode := api.DeploymentModeCluster - engine := api.StorageEngineRocksDB - - size10GB, _ := resource.ParseQuantity("10Gi") - size08GB, _ := resource.ParseQuantity("8Gi") - - deploymentClient := kubeArangoClient.MustNewClient() - deploymentTemplate := newDeployment(strings.Replace(fmt.Sprintf("trsz-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1)) - deploymentTemplate.Spec.Mode = api.NewMode(mode) - deploymentTemplate.Spec.StorageEngine = api.NewStorageEngine(engine) - deploymentTemplate.Spec.TLS = api.TLSSpec{} - deploymentTemplate.Spec.SetDefaults(deploymentTemplate.GetName()) // this must be last - assert.NoError(t, deploymentTemplate.Spec.Validate()) - assert.NotNil(t, deploymentTemplate.Spec.DBServers.VolumeClaimTemplate) - deploymentTemplate.Spec.DBServers.VolumeClaimTemplate.Spec.Resources.Requests[corev1.ResourceStorage] = size08GB - - // Create deployment - _, err := deploymentClient.DatabaseV1().ArangoDeployments(k8sNameSpace).Create(context.Background(), deploymentTemplate, metav1.CreateOptions{}) - defer removeDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace) - assert.NoError(t, err, "failed to create deplyment: %s", err) - - depl, err := waitUntilDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, deploymentIsReady()) - assert.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - - // Get list of all pvcs for dbservers - for _, m := range depl.Status.Members.DBServers { - pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - assert.NoError(t, err, "failed to get pvc: %s", err) - volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - assert.True(t, ok, "pvc does not have storage resource") - assert.True(t, volumeSize.Cmp(size08GB) == 0, "wrong volume size: expected: %s, found: %s", size08GB.String(), volumeSize.String()) - } - - // Update the deployment - // Try to change image version - depl, err = updateDeployment(deploymentClient, deploymentTemplate.GetName(), k8sNameSpace, - func(depl *api.DeploymentSpec) { - depl.DBServers.VolumeClaimTemplate.Spec.Resources.Requests[corev1.ResourceStorage] = size10GB - }) - if err != nil { - t.Fatalf("Failed to update the deployment") - } else { - t.Log("Updated deployment") - } - - if err := retry.Retry(func() error { - // Get list of all pvcs for dbservers and check for new size - for _, m := range depl.Status.Members.DBServers { - pvc, err := k8sClient.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - return err - } - volumeSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - if !ok { - return fmt.Errorf("pvc does not have storage resource") - } - if volumeSize.Cmp(size10GB) != 0 { - return fmt.Errorf("wrong pvc size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) - } - volume, err := k8sClient.CoreV1().PersistentVolumes().Get(context.Background(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - return err - } - volumeSize, ok = volume.Spec.Capacity[corev1.ResourceStorage] - if !ok { - return fmt.Errorf("pv does not have storage resource") - } - if volumeSize.Cmp(size10GB) != 0 { - return fmt.Errorf("wrong volume size: expected: %s, found: %s", size10GB.String(), volumeSize.String()) - } - if k8sutil.IsPersistentVolumeClaimFileSystemResizePending(pvc) { - return fmt.Errorf("persistent volume claim file system resize pending") - } - } - return nil - }, 5*time.Minute); err != nil { - t.Fatalf("PVCs not resized: %s", err.Error()) - } - -} - -func TestPVCChangeStorage(t *testing.T) { - longOrSkip(t) - - k8sNameSpace := getNamespace(t) - arangoClient := kubeArangoClient.MustNewClient() - kubecli := mustNewKubeClient(t) - mode := api.DeploymentModeCluster - - defaultStorageClass := getDefaultStorageClassOrDie(t, kubecli) - randomString := strings.ToLower(uniuri.NewLen(4)) - newStorageClassName := defaultStorageClass.GetName() + randomString - - newStorage := defaultStorageClass.DeepCopy() - newStorage.ObjectMeta = metav1.ObjectMeta{ - Name: newStorageClassName, - } - newStorage, err := kubecli.StorageV1().StorageClasses().Create(context.Background(), newStorage, metav1.CreateOptions{}) - require.NoError(t, err) - defer func() { - err := kubecli.StorageV1().StorageClasses().Delete(context.Background(), newStorage.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - }() - - name := strings.Replace(fmt.Sprintf("tcs-%s-%s", mode[:2], randomString), ".", "", -1) - depl, err := newDeploymentWithValidation(name, func(deployment *api.ArangoDeployment) { - var agentsCount, coordinatorCount, DBServersCount = 3, 2, 3 - - deployment.Spec.Mode = api.NewMode(mode) - deployment.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction) - - volumeMode := corev1.PersistentVolumeFilesystem - deployment.Spec.DBServers.VolumeClaimTemplate = &corev1.PersistentVolumeClaim{ - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - VolumeMode: &volumeMode, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - }, - }, - } - - deployment.Spec.DBServers.Count = util.NewInt(DBServersCount) - deployment.Spec.Agents.Count = util.NewInt(agentsCount) - deployment.Spec.Coordinators.Count = util.NewInt(coordinatorCount) - }) - require.NoError(t, err) - - // Create deployment - _, err = arangoClient.DatabaseV1().ArangoDeployments(k8sNameSpace).Create(context.Background(), depl, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create deployment: %s", err) - defer deferedCleanupDeployment(arangoClient, depl.GetName(), k8sNameSpace) - - depl, err = waitUntilDeployment(arangoClient, depl.GetName(), k8sNameSpace, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - require.NotNil(t, depl.Spec.DBServers.VolumeClaimTemplate) - - // Fill collection with documents - documentGenerator := NewDocumentGenerator(kubecli, depl, "collectionTest", 3, 200) - documentGenerator.generate(t, func(documentIndex int) interface{} { - type oneValue struct { - value int - } - return &oneValue{value: documentIndex} - }) - - // Update deployment - _, err = updateDeployment(arangoClient, depl.GetName(), k8sNameSpace, func(spec *api.DeploymentSpec) { - spec.DBServers.VolumeClaimTemplate.Spec.StorageClassName = util.NewString(newStorageClassName) - }) - require.NoError(t, err, "failed to update deployment: %s", err) - - // Check for updated deployment - isStorageChanged := func(deployment *api.ArangoDeployment) error { - pvc := deployment.Spec.DBServers.VolumeClaimTemplate - if pvc == nil { - return fmt.Errorf("persistant volume claim can not be nil") - } - if pvc.Spec.StorageClassName == nil { - return fmt.Errorf("storage class name can not be nil") - } - if *pvc.Spec.StorageClassName != newStorageClassName { - return fmt.Errorf("storage class name has not been changed") - } - - for _, server := range deployment.Status.Members.DBServers { - pvc, err := kubecli.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), server.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - return err - } - if pvc.Spec.StorageClassName == nil { - return fmt.Errorf("storage class name can not be nil") - } - if *pvc.Spec.StorageClassName != newStorageClassName { - return fmt.Errorf("storage class name has not been chagned") - } - } - return nil - } - - depl, err = waitUntilDeployment(arangoClient, depl.GetName(), k8sNameSpace, isStorageChanged, time.Minute*5) - require.NoError(t, err, "failed to change storage class for db servers: %s", err) - - // Check if documents are the same in the new storage - documentGenerator.check(t) - - // Cleanup - removeDeployment(arangoClient, depl.GetName(), k8sNameSpace) -} - -// Test deprecated functionality for changing storage class -func TestPVCChangeStorageDeprecated(t *testing.T) { - longOrSkip(t) - - k8sNameSpace := getNamespace(t) - arangoClient := kubeArangoClient.MustNewClient() - kubecli := mustNewKubeClient(t) - mode := api.DeploymentModeCluster - - defaultStorageClass := getDefaultStorageClassOrDie(t, kubecli) - randomString := strings.ToLower(uniuri.NewLen(4)) - newStorageClassName := defaultStorageClass.GetName() + randomString - - newStorage := defaultStorageClass.DeepCopy() - newStorage.ObjectMeta = metav1.ObjectMeta{ - Name: newStorageClassName, - } - newStorage, err := kubecli.StorageV1().StorageClasses().Create(context.Background(), newStorage, metav1.CreateOptions{}) - require.NoError(t, err) - defer func() { - err := kubecli.StorageV1().StorageClasses().Delete(context.Background(), newStorage.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - }() - - name := strings.Replace(fmt.Sprintf("tcs-%s-%s", mode[:2], randomString), ".", "", -1) - depl, err := newDeploymentWithValidation(name, func(deployment *api.ArangoDeployment) { - var agentsCount, coordinatorCount, DBServersCount = 3, 2, 3 - - deployment.Spec.Mode = api.NewMode(mode) - deployment.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction) - - deployment.Spec.DBServers.Resources.Requests = map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: resource.MustParse("2Gi"), - } - deployment.Spec.DBServers.StorageClassName = util.NewString(defaultStorageClass.Name) - deployment.Spec.DBServers.Count = util.NewInt(DBServersCount) - deployment.Spec.Agents.Count = util.NewInt(agentsCount) - deployment.Spec.Coordinators.Count = util.NewInt(coordinatorCount) - }) - require.NoError(t, err) - - // Create deployment - _, err = arangoClient.DatabaseV1().ArangoDeployments(k8sNameSpace).Create(context.Background(), depl, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create deployment: %s", err) - defer deferedCleanupDeployment(arangoClient, depl.GetName(), k8sNameSpace) - - depl, err = waitUntilDeployment(arangoClient, depl.GetName(), k8sNameSpace, deploymentIsReady()) - require.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - - // Fill collection with documents - documentGenerator := NewDocumentGenerator(kubecli, depl, "collectionTest", 3, 200) - documentGenerator.generate(t, func(documentIndex int) interface{} { - type oneValue struct { - value int - } - return &oneValue{value: documentIndex} - }) - - // Update deployment - _, err = updateDeployment(arangoClient, depl.GetName(), k8sNameSpace, func(spec *api.DeploymentSpec) { - spec.DBServers.StorageClassName = util.NewString(newStorageClassName) - }) - require.NoError(t, err, "failed to update deployment: %s", err) - - // Check for updated deployment - isDeprecatedStorageChanged := func(deployment *api.ArangoDeployment) error { - for _, server := range deployment.Status.Members.DBServers { - pvc, err := kubecli.CoreV1().PersistentVolumeClaims(k8sNameSpace).Get(context.Background(), server.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - return err - } - if pvc.Spec.StorageClassName == nil { - return fmt.Errorf("storage class name can not be nil") - } - if *pvc.Spec.StorageClassName != newStorageClassName { - return fmt.Errorf("storage class name has not been chagned") - } - } - return nil - } - - depl, err = waitUntilDeployment(arangoClient, depl.GetName(), k8sNameSpace, isDeprecatedStorageChanged, time.Minute*5) - require.NoError(t, err, "failed to change storage class for db servers: %s", err) - - // Check if documents are the same in the new storage - documentGenerator.check(t) - - // Cleanup - removeDeployment(arangoClient, depl.GetName(), k8sNameSpace) -} - -func getDefaultStorageClassOrDie(t *testing.T, kubecli kubernetes.Interface) *storagev1.StorageClass { - var defaultStorageClass *storagev1.StorageClass - storageClasses, err := kubecli.StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) - require.NoError(t, err) - - for _, sc := range storageClasses.Items { - if k8sutil.StorageClassIsDefault(&sc) { - defaultStorageClass = &sc - break - } - } - require.NotNilf(t, defaultStorageClass, "test needs default storage class") - return defaultStorageClass -} diff --git a/tests/predicates.go b/tests/predicates.go deleted file mode 100644 index 21e89a8c7..000000000 --- a/tests/predicates.go +++ /dev/null @@ -1,72 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -// deploymentIsReady creates a predicate that returns nil when the deployment is in -// the running phase and the `Ready` condition is true. -func deploymentIsReady() func(*api.ArangoDeployment) error { - return func(obj *api.ArangoDeployment) error { - if obj.Status.Phase != api.DeploymentPhaseRunning { - return fmt.Errorf("Expected Running phase, got %s", obj.Status.Phase) - } - if obj.Status.Conditions.IsTrue(api.ConditionTypeReady) { - return nil - } - return fmt.Errorf("Expected Ready condition to be set, it is not") - } -} - -func resourcesAsRequested(kubecli kubernetes.Interface, ns string) func(obj *api.ArangoDeployment) error { - return func(obj *api.ArangoDeployment) error { - return obj.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - - for _, m := range *status { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return err - } - - c, found := k8sutil.GetContainerByName(pod, k8sutil.ServerContainerName) - if !found { - return fmt.Errorf("Container not found: %s", m.PodName) - } - - if k8sutil.IsResourceRequirementsChanged(spec.Resources, c.Resources) { - return fmt.Errorf("Container of Pod %s need rotation", m.PodName) - } - } - - return nil - }, nil) - } -} diff --git a/tests/prepull_image_util.go b/tests/prepull_image_util.go deleted file mode 100644 index 5da7360b7..000000000 --- a/tests/prepull_image_util.go +++ /dev/null @@ -1,101 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "crypto/sha1" - "fmt" - "strings" - "time" - - "github.com/dchest/uniuri" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -// prepullImage runs a daemonset that pulls a given ArangoDB image on all nodes. -func prepullArangoImage(cli kubernetes.Interface, image, namespace string) error { - name := "prepuller-" + strings.ToLower(uniuri.NewLen(4)) - dsLabels := map[string]string{ - "app": "prepuller", - "image-hash": fmt.Sprintf("%0x", sha1.Sum([]byte(image)))[:10], - } - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: dsLabels, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: dsLabels, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: dsLabels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - corev1.Container{ - Name: "arango", - Image: image, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: "ARANGO_NO_AUTH", - Value: "1", - }, - }, - }, - }, - }, - }, - }, - } - // Create DS - if _, err := cli.AppsV1().DaemonSets(namespace).Create(context.Background(), ds, metav1.CreateOptions{}); err != nil { - return maskAny(err) - } - // Cleanup on exit - defer func() { - cli.AppsV1().DaemonSets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - }() - // Now wait for it to be ready - op := func() error { - current, err := cli.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if current.Status.DesiredNumberScheduled > current.Status.NumberReady { - return maskAny(fmt.Errorf("Expected %d pods to be ready, got %d", current.Status.DesiredNumberScheduled, current.Status.NumberReady)) - } - return nil - } - if err := retry.Retry(op, time.Hour); err != nil { - return maskAny(err) - } - return nil -} diff --git a/tests/resilience_test.go b/tests/resilience_test.go deleted file mode 100644 index 338e1c639..000000000 --- a/tests/resilience_test.go +++ /dev/null @@ -1,435 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/dchest/uniuri" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -// TestResiliencePod -// Tests handling of individual pod deletions -func TestResiliencePod(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - //fmt.Printf("There are %d pods in the cluster\n", len(pods.Items)) - - // Prepare deployment config - depl := newDeployment("test-pod-resilience-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Delete one pod after the other - apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - for _, m := range *status { - // Get current pod so we can compare UID later - originalPod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pod %s: %v", m.PodName, err) - } - // Get current PVC so we can compare UID later - var originalPVCUID types.UID - if m.PersistentVolumeClaimName != "" { - originalPVC, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get PVC %s: %v", m.PersistentVolumeClaimName, err) - } else { - originalPVCUID = originalPVC.GetUID() - } - } - // Now delete the pod - if err := kubecli.CoreV1().Pods(ns).Delete(context.Background(), m.PodName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pod %s: %v", m.PodName, err) - } - // Wait for pod to return with different UID - op := func() error { - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), m.PodName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if pod.GetUID() == originalPod.GetUID() { - return fmt.Errorf("Still original pod") - } - return nil - } - if err := retry.Retry(op, time.Minute*2); err != nil { - t.Fatalf("Pod did not restart: %v", err) - } - // Now that the Pod has been replaced, check that the PVC has NOT been replaced (if any) - if m.PersistentVolumeClaimName != "" { - pvc, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get PVC %s: %v", m.PersistentVolumeClaimName, err) - } else if originalPVCUID != pvc.GetUID() { - t.Errorf("PVC for member %s has been replaced", m.ID) - } - } - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - return nil - }, &apiObject.Status) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestResiliencePVCAgents -// Tests handling of individual PVCs of agents being deleted -func TestResiliencePVCAgents(t *testing.T) { - testResiliencePVC(api.ServerGroupAgents, t) -} - -// TestResiliencePVCDBServers -// Tests handling of individual PVCs of dbservers being deleted -func TestResiliencePVCDBServers(t *testing.T) { - testResiliencePVC(api.ServerGroupDBServers, t) -} - -// testResiliencePVC -// Tests handling of individual PVCs of given group being deleted -func testResiliencePVC(testGroup api.ServerGroup, t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment(fmt.Sprintf("test-pvc-resilience-%s-%s", testGroup.AsRoleAbbreviated(), uniuri.NewLen(4))) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Delete one pvc after the other - apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - if group != testGroup { - // We only test a specific group here - return nil - } - for _, m := range *status { - // Get current pvc so we can compare UID later - originalPVC, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pvc %s: %v", m.PersistentVolumeClaimName, err) - } - if err := kubecli.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), m.PersistentVolumeClaimName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pvc %s: %v", m.PersistentVolumeClaimName, err) - } - // Wait for pvc to return with different UID - op := func() error { - pvc, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - if k8sutil.IsNotFound(err) && group == api.ServerGroupDBServers { - // DBServer member is completely replaced when cleaned out, so the PVC will have a different name also - return nil - } - return maskAny(err) - } - if pvc.GetUID() == originalPVC.GetUID() { - return fmt.Errorf("Still original pvc") - } - return nil - } - if err := retry.Retry(op, time.Minute*2); err != nil { - t.Fatalf("PVC did not restart: %v", err) - } - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - return nil - }, &apiObject.Status) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestResiliencePVDBServer -// Tests handling of entire PVs of dbservers being removed. -func TestResiliencePVDBServer(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-pv-prmr-resi-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Delete one pv, pvc & pod after the other - apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - if group != api.ServerGroupDBServers { - // Agents cannot be replaced with a new ID - // Coordinators, Sync masters/workers have no persistent storage - return nil - } - for i, m := range *status { - // Only test first 2 - if i >= 2 { - continue - } - // Get current pvc so we can compare UID later - originalPVC, err := kubecli.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), m.PersistentVolumeClaimName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get pvc %s: %v", m.PersistentVolumeClaimName, err) - } - // Get current pv - pvName := originalPVC.Spec.VolumeName - require.NotEmpty(t, pvName, "VolumeName of %s must be non-empty", originalPVC.GetName()) - // Delete PV - if err := kubecli.CoreV1().PersistentVolumes().Delete(context.Background(), pvName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pv %s: %v", pvName, err) - } - // Delete PVC - if err := kubecli.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), m.PersistentVolumeClaimName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pvc %s: %v", m.PersistentVolumeClaimName, err) - } - // Delete Pod - /*if err := kubecli.CoreV1().Pods(ns).Delete(m.PodName, &metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete pod %s: %v", m.PodName, err) - }*/ - // Wait for cluster to be healthy again with the same number of - // dbservers, but the current dbserver being replaced. - expectedDBServerCount := apiObject.Spec.DBServers.GetCount() - unexpectedID := m.ID - pred := func(depl *api.ArangoDeployment) error { - if len(depl.Status.Members.DBServers) != expectedDBServerCount { - return maskAny(fmt.Errorf("Expected %d dbservers, got %d", expectedDBServerCount, len(depl.Status.Members.DBServers))) - } - if depl.Status.Members.ContainsID(unexpectedID) { - return maskAny(fmt.Errorf("Member %s should be gone", unexpectedID)) - } - return nil - } - if _, err := waitUntilDeployment(c, apiObject.GetName(), ns, pred, time.Minute*5); err != nil { - t.Fatalf("Deployment not ready in time: %v", err) - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - } - return nil - }, &apiObject.Status) - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestResilienceService -// Tests handling of individual service deletions -func TestResilienceService(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-service-resilience-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Fetch latest status so we know all member details - apiObject, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // Delete database service - // Get current pod so we can compare UID later - serviceName := apiObject.Status.ServiceName - originalService, err := kubecli.CoreV1().Services(ns).Get(context.Background(), serviceName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get service %s: %v", serviceName, err) - } - if err := kubecli.CoreV1().Services(ns).Delete(context.Background(), serviceName, metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete service %s: %v", serviceName, err) - } - // Wait for service to return with different UID - op := func() error { - service, err := kubecli.CoreV1().Services(ns).Get(context.Background(), serviceName, metav1.GetOptions{}) - if err != nil { - return maskAny(err) - } - if service.GetUID() == originalService.GetUID() { - return fmt.Errorf("Still original service") - } - return nil - } - if err := retry.Retry(op, time.Minute); err != nil { - t.Fatalf("PVC did not restart: %v", err) - } - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/resources_test.go b/tests/resources_test.go deleted file mode 100644 index 9af306907..000000000 --- a/tests/resources_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Lars Maier -// - -package tests - -import ( - "context" - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" -) - -func TestResourcesChangeLimitsCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - deploymentClient := kubeArangoClient.MustNewClient() - ns := getNamespace(t) - - size500m, _ := resource.ParseQuantity("50m") - size1, _ := resource.ParseQuantity("1") - size100Gi, _ := resource.ParseQuantity("100Gi") - size1Gi, _ := resource.ParseQuantity("1Gi") - size2Gi, _ := resource.ParseQuantity("2Gi") - - // Prepare deployment config - depl := newDeployment("test-chng-limits-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.DBServers.Count = util.NewInt(2) - depl.Spec.Coordinators.Count = util.NewInt(2) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Create deployment - _, err := deploymentClient.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - defer removeDeployment(deploymentClient, depl.GetName(), ns) - assert.NoError(t, err, "failed to create deplyment: %s", err) - - testGroups := []api.ServerGroup{api.ServerGroupCoordinators, api.ServerGroupAgents, api.ServerGroupDBServers} - - testCases := []v1.ResourceRequirements{ - { - Limits: v1.ResourceList{ - v1.ResourceCPU: size1, - }, - }, - { - Requests: v1.ResourceList{ - v1.ResourceCPU: size500m, - }, - }, - { - Requests: v1.ResourceList{ - v1.ResourceCPU: size500m, - v1.ResourceMemory: size1Gi, - }, - }, - { - Requests: v1.ResourceList{ - v1.ResourceCPU: size500m, - v1.ResourceMemory: size2Gi, - }, - }, - { - Limits: v1.ResourceList{ - v1.ResourceCPU: size1, - v1.ResourceMemory: size100Gi, - }, - }, - } - - for _, testgroup := range testGroups { - t.Run(testgroup.AsRole(), func(t *testing.T) { - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, deploymentIsReady()) - assert.NoError(t, err, fmt.Sprintf("Deployment not running in time: %s", err)) - - for i, testCase := range testCases { - t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - depl, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - gspec := spec.GetServerGroupSpec(testgroup) - gspec.Resources = testCase - spec.UpdateServerGroupSpec(testgroup, gspec) - }) - assert.NoError(t, err, fmt.Sprintf("Failed to update deployment: %s", err)) - - _, err = waitUntilDeployment(deploymentClient, depl.GetName(), ns, resourcesAsRequested(kubecli, ns)) - assert.NoError(t, err, fmt.Sprintf("Deployment not rotated in time: %s", err)) - }) - } - }) - } - -} diff --git a/tests/rocksdb_encryption_test.go b/tests/rocksdb_encryption_test.go deleted file mode 100644 index e3c9bf9f9..000000000 --- a/tests/rocksdb_encryption_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "crypto/rand" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/dchest/uniuri" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" -) - -// TestRocksDBEncryptionSingle tests the creating of a single server deployment -// with RocksDB & Encryption. -func TestRocksDBEncryptionSingle(t *testing.T) { - longOrSkip(t) - image := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - secrets := kubecli.CoreV1().Secrets(ns) - - // Prepull enterprise images - assert.NoError(t, prepullArangoImage(kubecli, image, ns)) - - // Prepare deployment config - depl := newDeployment("test-rocksdb-enc-sng-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.Image = util.NewString(image) - depl.Spec.StorageEngine = api.NewStorageEngine(api.StorageEngineRocksDB) - depl.Spec.RocksDB.Encryption.KeySecretName = util.NewString(strings.ToLower(uniuri.New())) - - // Create encryption key secret - key := make([]byte, 32) - rand.Read(key) - if err := k8sutil.CreateEncryptionKeySecret(secrets, depl.Spec.RocksDB.Encryption.GetKeySecretName(), key); err != nil { - t.Fatalf("Create encryption key secret failed: %v", err) - } - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) - removeSecret(kubecli, depl.Spec.RocksDB.Encryption.GetKeySecretName(), ns) -} diff --git a/tests/scale_test.go b/tests/scale_test.go deleted file mode 100644 index 1116edbda..000000000 --- a/tests/scale_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" -) - -// TestScaleCluster tests scaling up/down the number of DBServers & coordinators -// of a cluster. -func TestScaleClusterNonTLS(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-scale-non-tls" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.TLS = api.TLSSpec{CASecretName: util.NewString("None")} - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Add 2 DBServers, 1 coordinator - updated, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(5) - spec.Coordinators.Count = util.NewInt(4) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-up, in expected health in time: %v", err) - } - - // Remove 3 DBServers, 2 coordinator - updated, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(3) - spec.Coordinators.Count = util.NewInt(2) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-down, in expected health in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -func TestScaleCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-scale" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()); err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Add 2 DBServers, 1 coordinator - updated, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(5) - spec.Coordinators.Count = util.NewInt(4) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-up, in expected health in time: %v", err) - } - - // Remove 3 DBServers, 2 coordinator - updated, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(3) - spec.Coordinators.Count = util.NewInt(2) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-down, in expected health in time: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} - -// TestScaleClusterWithSync tests scaling a cluster deployment with sync enabled. -func TestScaleClusterWithSync(t *testing.T) { - longOrSkip(t) - img := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-scale-sync" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Image = util.NewString(img) - depl.Spec.Sync.Enabled = util.NewBool(true) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Create a syncmaster client - syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Wait for syncmasters to be available - if err := waitUntilSyncVersionUp(syncClient); err != nil { - t.Fatalf("SyncMasters not running returning version in time: %v", err) - } - - // Add 1 DBServer, 2 SyncMasters, 1 syncworker - updated, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(4) - spec.SyncMasters.Count = util.NewInt(5) - spec.SyncWorkers.Count = util.NewInt(4) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-up, in expected health in time: %v", err) - } - // Check number of syncmasters - if err := waitUntilSyncMasterCountReached(syncClient, updated.Spec.SyncMasters.GetCount()); err != nil { - t.Fatalf("Unexpected #syncmasters, after scale-up: %v", err) - } - // Check number of syncworkers - if err := waitUntilSyncWorkerCountReached(syncClient, updated.Spec.SyncWorkers.GetCount()); err != nil { - t.Fatalf("Unexpected #syncworkers, after scale-up: %v", err) - } - - // Remove 1 DBServer, 2 SyncMasters & 1 SyncWorker - updated, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.DBServers.Count = util.NewInt(3) - spec.SyncMasters.Count = util.NewInt(3) - spec.SyncWorkers.Count = util.NewInt(3) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-down, in expected health in time: %v", err) - } - // Check number of syncmasters - if err := waitUntilSyncMasterCountReached(syncClient, updated.Spec.SyncMasters.GetCount()); err != nil { - t.Fatalf("Unexpected #syncmasters, after scale-up: %v", err) - } - // Check number of syncworkers - if err := waitUntilSyncWorkerCountReached(syncClient, updated.Spec.SyncWorkers.GetCount()); err != nil { - t.Fatalf("Unexpected #syncworkers, after scale-up: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/secret_hashes_test.go b/tests/secret_hashes_test.go deleted file mode 100644 index e05ca31bc..000000000 --- a/tests/secret_hashes_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author tomasz@arangodb.con -// - -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/arangodb/go-driver" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/arangod" - "github.com/arangodb/kube-arangodb/pkg/util/constants" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/dchest/uniuri" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TestSecretHashesRootUser checks if Status.SecretHashes.Users[root] changed after request for it -func TestSecretHashesRootUser(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-auth-sng-def-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.SetDefaults(depl.GetName()) - depl.Spec.Bootstrap.PasswordSecretNames[api.UserNameRoot] = api.PasswordSecretNameAuto - - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - depl, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - depl, err = waitUntilDeployment(c, depl.GetName(), ns, func(obj *api.ArangoDeployment) error { - // check if root secret password is set - secretHashes := obj.Status.SecretHashes - if secretHashes == nil { - return fmt.Errorf("field Status.SecretHashes is not set") - } - - if secretHashes.Users == nil { - return fmt.Errorf("field Status.SecretHashes.Users is not set") - } - - if hash, ok := secretHashes.Users[api.UserNameRoot]; !ok { - return fmt.Errorf("field Status.SecretHashes.Users[root] is not set") - } else if len(hash) == 0 { - return fmt.Errorf("field Status.SecretHashes.Users[root] is empty") - } - - return nil - }) - - if err != nil { - t.Fatalf("Deployment is not set properly: %v", err) - } - rootHashSecret := depl.Status.SecretHashes.Users[api.UserNameRoot] - - secretRootName := string(depl.Spec.Bootstrap.PasswordSecretNames[api.UserNameRoot]) - secretRoot, err := waitUntilSecret(kubecli, secretRootName, ns, time.Second) - if err != nil { - t.Fatalf("Root secret '%s' not found: %v", secretRootName, err) - } - - secretRoot.Data[constants.SecretPassword] = []byte("1") - _, err = kubecli.CoreV1().Secrets(ns).Update(context.Background(), secretRoot, metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("Root secret '%s' has not been changed: %v", secretRootName, err) - } - - err = retry.Retry(func() error { - // check if root secret hash has changed - depl, err = c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - if rootHashSecret == depl.Status.SecretHashes.Users[api.UserNameRoot] { - return maskAny(errors.New("field Status.SecretHashes.Users[root] has not been changed yet")) - } - return nil - }, deploymentReadyTimeout) - if err != nil { - t.Fatalf("%v", err) - } - - // Check if password changed - auth := driver.BasicAuthentication(api.UserNameRoot, "1") - _, err = client.Connection().SetAuthentication(auth) - if err != nil { - t.Fatalf("The password for user '%s' has not been changed: %v", api.UserNameRoot, err) - } - _, err = client.Version(context.Background()) - if err != nil { - t.Fatalf("can not get version after the password has been changed") - } - - //Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/service_account_test.go b/tests/service_account_test.go deleted file mode 100644 index d5da4808c..000000000 --- a/tests/service_account_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "strings" - "testing" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" -) - -// TestServiceAccountSingle tests the creating of a single server deployment -// with default settings using a custom service account. -func TestServiceAccountSingle(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare service account - namePrefix := "test-sa-sng-" - saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) - defer deleteServiceAccount(kubecli, saName, ns) - - // Prepare deployment config - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.Single.ServiceAccountName = util.NewString(saName) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Check service account name - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Single, saName, t) - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingle)) -} - -// TestServiceAccountActiveFailover tests the creating of a ActiveFailover server deployment -// with default settings using a custom service account. -func TestServiceAccountActiveFailover(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare service account - namePrefix := "test-sa-rs-" - saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) - defer deleteServiceAccount(kubecli, saName, ns) - - // Prepare deployment config - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover) - depl.Spec.Single.ServiceAccountName = util.NewString(saName) - depl.Spec.Agents.ServiceAccountName = util.NewString(saName) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("ActiveFailover servers not running returning version in time: %v", err) - } - - // Check service account name - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Single, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingleActive)) -} - -// TestServiceAccountCluster tests the creating of a cluster deployment -// with default settings using a custom service account. -func TestServiceAccountCluster(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare service account - namePrefix := "test-sa-cls-" - saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) - defer deleteServiceAccount(kubecli, saName, ns) - - // Prepare deployment config - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Agents.ServiceAccountName = util.NewString(saName) - depl.Spec.DBServers.ServiceAccountName = util.NewString(saName) - depl.Spec.Coordinators.ServiceAccountName = util.NewString(saName) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Check service account name - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Coordinators, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.DBServers, saName, t) - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleCoordinator)) -} - -// TestServiceAccountClusterWithSync tests the creating of a cluster deployment -// with default settings and sync enabled using a custom service account. -func TestServiceAccountClusterWithSync(t *testing.T) { - longOrSkip(t) - img := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare service account - namePrefix := "test-sa-cls-sync-" - saName := mustCreateServiceAccount(kubecli, namePrefix, ns, t) - defer deleteServiceAccount(kubecli, saName, ns) - - // Prepare deployment config - depl := newDeployment(namePrefix + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Image = util.NewString(img) - depl.Spec.Sync.Enabled = util.NewBool(true) - depl.Spec.Agents.ServiceAccountName = util.NewString(saName) - depl.Spec.DBServers.ServiceAccountName = util.NewString(saName) - depl.Spec.Coordinators.ServiceAccountName = util.NewString(saName) - depl.Spec.SyncMasters.ServiceAccountName = util.NewString(saName) - depl.Spec.SyncWorkers.ServiceAccountName = util.NewString(saName) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Create a syncmaster client - syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t) - - // Wait for syncmasters to be available - if err := waitUntilSyncVersionUp(syncClient); err != nil { - t.Fatalf("SyncMasters not running returning version in time: %v", err) - } - - // Check service account name - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Agents, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.Coordinators, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.DBServers, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.SyncMasters, saName, t) - checkMembersUsingServiceAccount(kubecli, ns, apiObject.Status.Members.SyncWorkers, saName, t) - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleCoordinator)) -} - -// mustCreateServiceAccount creates an empty service account with random name and returns -// its name. On error, the test is failed. -func mustCreateServiceAccount(kubecli kubernetes.Interface, namePrefix, ns string, t *testing.T) string { - s := v1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(namePrefix + uniuri.NewLen(4)), - }, - } - if _, err := kubecli.CoreV1().ServiceAccounts(ns).Create(context.Background(), &s, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create service account: %v", err) - } - return s.GetName() -} - -// deleteServiceAccount deletes a service account with given name in given namespace. -func deleteServiceAccount(kubecli kubernetes.Interface, name, ns string) error { - if err := kubecli.CoreV1().ServiceAccounts(ns).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { - return maskAny(err) - } - return nil -} - -// checkMembersUsingServiceAccount checks the serviceAccountName of the pods of all members -// to ensure that is equal to the given serviceAccountName. -func checkMembersUsingServiceAccount(kubecli kubernetes.Interface, ns string, members []api.MemberStatus, serviceAccountName string, t *testing.T) { - pods := kubecli.CoreV1().Pods(ns) - for _, m := range members { - if p, err := pods.Get(context.Background(), m.PodName, metav1.GetOptions{}); err != nil { - t.Errorf("Failed to get pod for member '%s': %v", m.ID, err) - } else if p.Spec.ServiceAccountName != serviceAccountName { - t.Errorf("Expected pod '%s' to have serviceAccountName '%s', got '%s'", p.GetName(), serviceAccountName, p.Spec.ServiceAccountName) - } - } -} diff --git a/tests/sidecar_test.go b/tests/sidecar_test.go deleted file mode 100644 index 60204083a..000000000 --- a/tests/sidecar_test.go +++ /dev/null @@ -1,427 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Kaveh Vahedipour -// -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/dchest/uniuri" - v1 "k8s.io/api/core/v1" -) - -type sideCarTest struct { - shortTest bool - name string - mode api.DeploymentMode - version string - image string - imageTag string - sideCars map[string][]v1.Container -} - -type SideCarTest interface { - IsShortTest() bool - Mode() api.DeploymentMode - Name() string - Image() string - Version() driver.Version - GroupSideCars(string) []v1.Container - AddSideCar(string, v1.Container) - ClearGroupSideCars(group string) -} - -func (s *sideCarTest) IsShortTest() bool { - return s.shortTest -} -func (s *sideCarTest) Name() string { - return s.name -} -func (s *sideCarTest) Mode() api.DeploymentMode { - return s.mode -} -func (s *sideCarTest) Version() driver.Version { - return driver.Version(s.version) -} -func (s *sideCarTest) GroupSideCars(group string) []v1.Container { - if s.sideCars == nil { - s.sideCars = make(map[string][]v1.Container) - } - return s.sideCars[group] -} - -func (s *sideCarTest) AddSideCar(group string, container v1.Container) { - if s.sideCars == nil { - s.sideCars = make(map[string][]v1.Container) - } - s.sideCars[group] = append(s.sideCars[group], container) -} - -func (s *sideCarTest) Image() string { - imageName := "arangodb/arangodb" - if s.image != "" { - imageName = s.image - } - imageTag := "latest" - if s.imageTag != "" { - imageTag = s.imageTag - } - return fmt.Sprintf("%s:%s", imageName, imageTag) -} -func (s *sideCarTest) ClearGroupSideCars(group string) { - s.sideCars[group] = nil -} - -// TestSideCars tests side car functionality -func TestSideCars(t *testing.T) { - runSideCarTest(t, &sideCarTest{ - version: "3.4.7", - imageTag: "3.4.7", - name: "sidecar-tests", - }) -} - -func runSideCarTest(t *testing.T, spec SideCarTest) { - - if !spec.IsShortTest() { - longOrSkip(t) - } - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - depl := newDeployment(fmt.Sprintf("tu-%s-%s", spec.Name(), uniuri.NewLen(4))) - depl.Spec.Mode = api.NewMode(spec.Mode()) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.Image = util.NewString(spec.Image()) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - deployment, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - deployment, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil) - if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, spec.Version()); err != nil { - t.Fatalf("Deployment not healthy in time: %v", err) - } - - // Add sidecar to coordinators - var coordinators = api.ServerGroupCoordinators.AsRole() - var dbservers = api.ServerGroupDBServers.AsRole() - var agents = api.ServerGroupAgents.AsRole() - - var name = "nginx" - var image = "nginx:1.7.9" - - t.Run("Add first sidecar to coordinators", func(t *testing.T) { - spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name}) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to add %s to group %s", name, coordinators) - } else { - t.Logf("Add %s sidecar to group %s ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - cmd1 := []string{"sh", "-c", "sleep 3600"} - cmd2 := []string{"sh", "-c", "sleep 1800"} - cmd := []string{"sh"} - args := []string{"-c", "sleep 3600"} - - // Add 2nd sidecar to coordinators - image = "busybox" - name = "sleeper" - t.Run("Add 2nd sidecar to coordinators", func(t *testing.T) { - spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name, Command: cmd1}) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to add %s to group %s", name, coordinators) - } else { - t.Logf("Add sidecar %s to group %s ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Update command line of second sidecar - t.Run("Update command line of second sidecar", func(t *testing.T) { - spec.GroupSideCars(coordinators)[1].Command = cmd2 - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to update %s in group %s with new command line", name, coordinators) - } else { - t.Logf("Update %s in group %s with new command line ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Change command line args of second sidecar - t.Run("Update command line args of second sidecar", func(t *testing.T) { - spec.GroupSideCars(coordinators)[1].Command = cmd - spec.GroupSideCars(coordinators)[1].Args = args - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to update %s in group %s with new command line arguments", name, coordinators) - } else { - t.Logf("Update %s in group %s with new command line arguments ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Change environment variables of second container - t.Run("Change environment variables of second sidecar", func(t *testing.T) { - spec.GroupSideCars(coordinators)[1].Env = []v1.EnvVar{ - {Name: "Hello", Value: "World"}, {Name: "Pi", Value: "3.14159265359"}, {Name: "Two", Value: "2"}} - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to change environment variables of %s sidecars for %s", name, coordinators) - } else { - t.Logf("Change environment variables of %s sidecars for %s ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Upgrade side car image - name = spec.GroupSideCars(coordinators)[0].Name - t.Run("Upgrade side car image", func(t *testing.T) { - spec.GroupSideCars(coordinators)[0].Image = "nginx:1.7.10" - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to update %s in group %s with new image", name, coordinators) - } else { - t.Logf("Update image of sidecar %s in group %s ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Update side car image with new pull policy - t.Run("Update side car image with new pull policy", func(t *testing.T) { - spec.GroupSideCars(coordinators)[0].ImagePullPolicy = v1.PullPolicy("Always") - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to update %s in group %s with new image pull policy", name, coordinators) - } else { - t.Logf("Update %s in group %s with new image pull policy ...", name, coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Remove all sidecars again - t.Run("Remove all sidecars again", func(t *testing.T) { - spec.ClearGroupSideCars(coordinators) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to remove all sidecars from group %s", coordinators) - } else { - t.Logf("Remove all sidecars from group %s ...", coordinators) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Adding containers to coordinators and db servers - image = "busybox" - name = "sleeper" - t.Run("Add containers to coordinators and dbservers", func(t *testing.T) { - spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name, Command: cmd1}) - spec.AddSideCar(dbservers, v1.Container{Image: image, Name: name, Command: cmd1}) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - depl.DBServers.Sidecars = spec.GroupSideCars(dbservers) - }) - if err != nil { - t.Fatalf("Failed to add a container to both coordinators and db servers") - } else { - t.Logf("Add %s sidecar to %s and %s ...", name, coordinators, dbservers) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Check that no pod rotation happens for 2 mins, this is to check that - // no unnecessary rotations happen and to guard against a regression. - t.Run("Check no pod rotation", func(t *testing.T) { - d, err := waitUntilDeployment(c, depl.GetName(), ns, resourcesAsRequested(kubecli, ns)) - if err != nil { - t.Fatalf("Deployment not rotated in time: %s", err) - } - podCreationTimes := getPodCreationTimes(t, kubecli, d) - time.Sleep(2 * time.Minute) - checkPodCreationTimes(t, kubecli, d, podCreationTimes) - }) - - // Clear containers from both groups - t.Run("Clear containers from both groups", func(t *testing.T) { - spec.ClearGroupSideCars(coordinators) - spec.ClearGroupSideCars(dbservers) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - depl.DBServers.Sidecars = spec.GroupSideCars(dbservers) - }) - if err != nil { - t.Fatalf("Failed to delete all containers from both coordinators and db servers") - } else { - t.Logf("Remove all sidecars ...") - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Adding containers to coordinators again - t.Run("Add containers to coordinators again", func(t *testing.T) { - spec.AddSideCar(agents, v1.Container{Image: image, Name: name, Command: cmd1}) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - }) - if err != nil { - t.Fatalf("Failed to add a %s sidecar to %s", name, agents) - } else { - t.Logf("Add a %s sidecar to %s ...", name, agents) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Clear containers from coordinators and add to db servers - t.Run("Clear containers from coordinators and add to dbservers", func(t *testing.T) { - spec.ClearGroupSideCars(agents) - spec.AddSideCar(dbservers, v1.Container{Image: image, Name: name, Command: cmd1}) - deployment, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators) - depl.DBServers.Sidecars = spec.GroupSideCars(dbservers) - }) - if err != nil { - t.Fatalf("Failed to delete %s containers and add %s sidecars to %s", agents, name, dbservers) - } else { - t.Logf("Delete %s containers and add %s sidecars to %s", agents, name, dbservers) - } - err = waitUntilClusterSidecarsEqualSpec(t, *depl) - if err != nil { - t.Fatalf("... failed: %v", err) - } else { - t.Log("... done") - } - }) - - // Clean up - removeDeployment(c, depl.GetName(), ns) - -} diff --git a/tests/simple_test.go b/tests/simple_test.go deleted file mode 100644 index ec0d1a7fc..000000000 --- a/tests/simple_test.go +++ /dev/null @@ -1,201 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/dchest/uniuri" - "github.com/stretchr/testify/assert" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" -) - -// TestSimpleSingle tests the creating of a single server deployment -// with default settings. -func TestSimpleSingle(t *testing.T) { - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-sng-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingle)) -} - -// TestSimpleActiveFailover tests the creating of a ActiveFailover server deployment -// with default settings. -func TestSimpleActiveFailover(t *testing.T) { - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-rs-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("ActiveFailover servers not running returning version in time: %v", err) - } - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingleActive)) -} - -// TestSimpleCluster tests the creating of a cluster deployment -// with default settings. -func TestSimpleCluster(t *testing.T) { - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-cls-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleCoordinator)) -} - -// TestSimpleClusterWithSync tests the creating of a cluster deployment -// with default settings and sync enabled. -func TestSimpleClusterWithSync(t *testing.T) { - img := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-cls-sync-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Image = util.NewString(img) - depl.Spec.Sync.Enabled = util.NewBool(true) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer removeDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be available - if err := waitUntilVersionUp(client, nil); err != nil { - t.Fatalf("Cluster not running returning version in time: %v", err) - } - - // Create a syncmaster client - syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t) - - // Wait for syncmasters to be available - if err := waitUntilSyncVersionUp(syncClient); err != nil { - t.Fatalf("SyncMasters not running returning version in time: %v", err) - } - - // Check server role - assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleCoordinator)) -} diff --git a/tests/sync/Dockerfile b/tests/sync/Dockerfile deleted file mode 100644 index 045beda19..000000000 --- a/tests/sync/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM scratch - -ADD bin/arangodb_operator_sync_test_ctrl /usr/bin/ - -ENTRYPOINT [ "/usr/bin/arangodb_operator_sync_test_ctrl" ] \ No newline at end of file diff --git a/tests/sync/main.go b/tests/sync/main.go deleted file mode 100644 index e6275fafa..000000000 --- a/tests/sync/main.go +++ /dev/null @@ -1,427 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "flag" - "fmt" - "io" - "log" - "net" - "os" - "strconv" - "time" - - "github.com/arangodb/kube-arangodb/pkg/apis/replication" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - - sync "github.com/arangodb/arangosync-client/client" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - dapi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - rapi "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util" -) - -var ( - arangoImage string - arangoSyncTestImage string - arangoSyncImage string - licenseKeySecretName string - namespace string - additionalTestArgs string -) - -const ( - accessPackageSecretName = "dst-access-package" - dstDeploymentName = "dc-dst" - srcDeploymentName = "dc-src" - replicationResourceName = "dc-dst-src-replication" - arangosyncTestPodName = "kube-arango-sync-tests" -) - -func init() { - flag.StringVar(&arangoImage, "arango-image", "arangodb/enterprise:latest", "ArangoDB Enterprise image used for test") - flag.StringVar(&arangoSyncTestImage, "arango-sync-test-image", "", "ArangoSync test image") - flag.StringVar(&arangoSyncImage, "arango-sync-image", "", "ArangoSync Image used for testing") - flag.StringVar(&licenseKeySecretName, "license-key-secret-name", "arangodb-license-key", "Secret name of the license key used for the deployments") - flag.StringVar(&namespace, "namespace", "default", "Testing namespace") - flag.StringVar(&additionalTestArgs, "test-args", "", "Additional parameters passed to the test executable") -} - -func newDeployment(ns, name string) *dapi.ArangoDeployment { - return &dapi.ArangoDeployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: dapi.SchemeGroupVersion.String(), - Kind: deployment.ArangoDeploymentResourceKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - // OwnerReferences: []metav1.OwnerReference{ - // metav1.OwnerReference{ - // }, - // }, - }, - Spec: dapi.DeploymentSpec{ - Image: util.NewString(arangoImage), - License: dapi.LicenseSpec{ - SecretName: util.NewString(licenseKeySecretName), - }, - }, - } -} - -func newSyncDeployment(ns, name string, accessPackage bool) *dapi.ArangoDeployment { - d := newDeployment(ns, name) - d.Spec.Sync = dapi.SyncSpec{ - Enabled: util.NewBool(true), - ExternalAccess: dapi.SyncExternalAccessSpec{ - ExternalAccessSpec: dapi.ExternalAccessSpec{ - Type: dapi.NewExternalAccessType(dapi.ExternalAccessTypeNone), - }, - }, - } - - d.Spec.SyncMasters.Args = append(d.Spec.SyncMasters.Args, "--log.level=debug") - d.Spec.SyncWorkers.Args = append(d.Spec.SyncWorkers.Args, "--log.level=debug") - - if accessPackage { - d.Spec.Sync.ExternalAccess.AccessPackageSecretNames = []string{accessPackageSecretName} - } - - if arangoSyncImage != "" { - d.Spec.Sync.Image = util.NewString(arangoSyncImage) - } - return d -} - -func newReplication(ns, name string) *rapi.ArangoDeploymentReplication { - return &rapi.ArangoDeploymentReplication{ - TypeMeta: metav1.TypeMeta{ - APIVersion: rapi.SchemeGroupVersion.String(), - Kind: replication.ArangoDeploymentReplicationResourceKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: rapi.DeploymentReplicationSpec{ - Source: rapi.EndpointSpec{ - DeploymentName: util.NewString(srcDeploymentName), - Authentication: rapi.EndpointAuthenticationSpec{ - KeyfileSecretName: util.NewString(accessPackageSecretName), - }, - TLS: rapi.EndpointTLSSpec{ - CASecretName: util.NewString(accessPackageSecretName), - }, - }, - Destination: rapi.EndpointSpec{ - DeploymentName: util.NewString(dstDeploymentName), - }, - }, - } -} - -func waitForSyncDeploymentReady(ctx context.Context, ns, name string, kubecli kubernetes.Interface, c versioned.Interface) error { - return retry.Retry(func() error { - deployment, err := c.DatabaseV1().ArangoDeployments(ns).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return err - } - - sc, err := mustNewArangoDBSyncClient(ctx, kubecli, deployment) - if err != nil { - return err - } - - info, err := sc.Master().Status(ctx) - if err != nil { - return err - } - - if info.Status != sync.SyncStatusRunning { - return fmt.Errorf("SyncStatus not running: %s", info.Status) - } - - return nil - }, 5*time.Minute) -} - -func setupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c versioned.Interface) error { - - dstSpec := newSyncDeployment(namespace, dstDeploymentName, false) - srcSpec := newSyncDeployment(namespace, srcDeploymentName, true) - - if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(ctx, srcSpec, metav1.CreateOptions{}); err != nil { - return err - } - if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(ctx, dstSpec, metav1.CreateOptions{}); err != nil { - return err - } - - replSpec := newReplication(namespace, replicationResourceName) - if _, err := c.ReplicationV1().ArangoDeploymentReplications(namespace).Create(ctx, replSpec, metav1.CreateOptions{}); err != nil { - return err - } - - log.Print("Deployments and Replication created") - - //if err := waitForSyncDeploymentReady(ctx, namespace, srcSpec.GetName(), kube, c); err != nil { - // return errors.Wrap(err, "Source Cluster not ready") - //} - - if err := waitForSyncDeploymentReady(ctx, namespace, dstSpec.GetName(), kube, c); err != nil { - return errors.Wrap(err, "Destination Cluster not ready") - } - - log.Print("Deployments and Replication ready") - - return nil -} - -func waitForReplicationGone(ns, name string, c versioned.Interface) error { - return retry.Retry(func() error { - if _, err := c.ReplicationV1().ArangoDeploymentReplications(ns).Get(context.Background(), name, metav1.GetOptions{}); k8sutil.IsNotFound(err) { - return nil - } else if err != nil { - return err - } - return fmt.Errorf("Replication resource not gone") - }, 1*time.Minute) -} - -func waitForDeploymentGone(ns, name string, c versioned.Interface) error { - return retry.Retry(func() error { - if _, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), name, metav1.GetOptions{}); k8sutil.IsNotFound(err) { - return nil - } else if err != nil { - return err - } - return fmt.Errorf("Deployment resource %s not gone", name) - }, 1*time.Minute) -} - -func removeReplicationWaitForCompletion(ns, name string, c versioned.Interface) error { - if err := c.ReplicationV1().ArangoDeploymentReplications(ns).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { - if k8sutil.IsNotFound(err) { - return nil - } - return err - } - if err := waitForReplicationGone(ns, name, c); err != nil { - return err - } - return nil -} - -func removeDeploymentWaitForCompletion(ns, name string, c versioned.Interface) error { - if err := c.DatabaseV1().ArangoDeployments(ns).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { - if k8sutil.IsNotFound(err) { - return nil - } - return err - } - if err := waitForDeploymentGone(ns, name, c); err != nil { - return err - } - return nil -} - -func cleanupArangoDBCluster(c versioned.Interface) error { - if err := removeReplicationWaitForCompletion(namespace, replicationResourceName, c); err != nil { - return err - } - if err := removeDeploymentWaitForCompletion(namespace, dstDeploymentName, c); err != nil { - return err - } - if err := removeDeploymentWaitForCompletion(namespace, srcDeploymentName, c); err != nil { - return err - } - return nil -} - -func waitForPodRunning(ns, name string, kube kubernetes.Interface) error { - return retry.Retry(func() error { - pod, err := kube.CoreV1().Pods(ns).Get(context.Background(), name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !k8sutil.IsPodReady(pod) { - return fmt.Errorf("pod not ready") - } - return nil - - }, 1*time.Minute) -} - -func copyPodLogs(ns, name string, kube kubernetes.Interface) error { - logs, err := kube.CoreV1().Pods(ns).GetLogs(name, &corev1.PodLogOptions{ - Follow: true, - }).Stream(context.Background()) - if err != nil { - return err - } - - defer logs.Close() - if _, err := io.Copy(os.Stdout, logs); err != nil { - return err - } - return nil -} - -func createArangoSyncTestPod(ns, name string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - corev1.Container{ - Name: "tests", - Image: arangoSyncTestImage, - ImagePullPolicy: corev1.PullAlways, - Args: []string{"-test.v", additionalTestArgs}, - Env: []corev1.EnvVar{ - corev1.EnvVar{ - Name: "MASTERAENDPOINTS", - Value: fmt.Sprintf("https://%s-sync.%s.svc:8629/", srcDeploymentName, namespace), - }, - corev1.EnvVar{ - Name: "MASTERBENDPOINTS", - Value: fmt.Sprintf("https://%s-sync.%s.svc:8629/", dstDeploymentName, namespace), - }, - corev1.EnvVar{ - Name: "CLUSTERAENDPOINTS", - Value: fmt.Sprintf("https://%s.%s.svc:8529/", srcDeploymentName, namespace), - }, - corev1.EnvVar{ - Name: "CLUSTERBENDPOINTS", - Value: fmt.Sprintf("https://%s.%s.svc:8529/", dstDeploymentName, namespace), - }, - corev1.EnvVar{ - Name: "CLUSTERACACERT", - Value: "/data/access/ca.crt", - }, - corev1.EnvVar{ - Name: "CLUSTERACLIENTCERT", - Value: "/data/access/tls.keyfile", - }, - corev1.EnvVar{ - Name: "CLUSTERMANAGED", - Value: "yes", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - MountPath: "/data/access", - Name: "access", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - corev1.Volume{ - Name: "access", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: accessPackageSecretName, - }, - }, - }, - }, - }, - } -} - -func runArangoSyncTests(kube kubernetes.Interface) error { - - // Start a new pod with the test image - defer kube.CoreV1().Pods(namespace).Delete(context.Background(), arangosyncTestPodName, metav1.DeleteOptions{}) - podspec := createArangoSyncTestPod(namespace, arangosyncTestPodName) - if _, err := kube.CoreV1().Pods(namespace).Create(context.Background(), podspec, metav1.CreateOptions{}); err != nil { - return err - } - - log.Printf("Test pod created") - - if err := waitForPodRunning(namespace, arangosyncTestPodName, kube); err != nil { - return err - } - - log.Printf("Test pod running, receiving log") - - if err := copyPodLogs(namespace, arangosyncTestPodName, kube); err != nil { - return err - } - - pod, err := kube.CoreV1().Pods(namespace).Get(context.Background(), arangosyncTestPodName, metav1.GetOptions{}) - if err != nil { - return err - } - - if !k8sutil.IsPodSucceeded(pod) { - return fmt.Errorf("Pod not succeded") - } - - return nil -} - -func main() { - flag.Parse() - ctx := context.Background() - kube := k8sutil.MustNewKubeClient() - c := client.MustNewClient() - - defer removeReplicationWaitForCompletion(namespace, replicationResourceName, c) - defer removeDeploymentWaitForCompletion(namespace, dstDeploymentName, c) - defer removeDeploymentWaitForCompletion(namespace, srcDeploymentName, c) - if err := setupArangoDBCluster(ctx, kube, c); err != nil { - log.Printf("Failed to setup deployment: %s", err.Error()) - return - } - - exitCode := 0 - - if err := runArangoSyncTests(kube); err != nil { - log.Printf("ArangoSync tests failed: %s", err.Error()) - exitCode = 1 - } - - if err := cleanupArangoDBCluster(c); err != nil { - log.Printf("Failed to clean up deployments: %s", err.Error()) - } - - os.Exit(exitCode) -} - -func mustNewArangoDBSyncClient(ctx context.Context, kubecli kubernetes.Interface, deployment *dapi.ArangoDeployment) (sync.API, error) { - ns := deployment.GetNamespace() - secrets := kubecli.CoreV1().Secrets(ns) - secretName := deployment.Spec.Sync.Authentication.GetJWTSecretName() - jwtSecret, err := k8sutil.GetTokenSecret(ctx, secrets, secretName) - if err != nil { - return nil, err - } - - // Fetch service DNS name - dnsName := k8sutil.CreateSyncMasterClientServiceDNSName(deployment) - ep := sync.Endpoint{"https://" + net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoSyncMasterPort))} - - api, err := sync.NewArangoSyncClient(ep, sync.AuthenticationConfig{JWTSecret: jwtSecret}, &tls.Config{InsecureSkipVerify: true}) - if err != nil { - return nil, err - } - return api, nil -} diff --git a/tests/sync_test.go b/tests/sync_test.go deleted file mode 100644 index b831265a6..000000000 --- a/tests/sync_test.go +++ /dev/null @@ -1,255 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// - -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/dchest/uniuri" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -// waitUntilReplicationNotFound waits until a replication resource is deleted -func waitUntilReplicationNotFound(ns, name string, cli versioned.Interface) error { - return retry.Retry(func() error { - if _, err := cli.ReplicationV1().ArangoDeploymentReplications(ns).Get(context.Background(), name, metav1.GetOptions{}); k8sutil.IsNotFound(err) { - return nil - } else if err != nil { - return err - } - return fmt.Errorf("Resource not yet gone") - }, time.Minute) -} - -// TestSyncSimple create two clusters and configures sync between them. -// Then it creates a test collection in source and waits for it to appear in dest. -func TestSyncSimple(t *testing.T) { - longOrSkip(t) - img := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - apname := "test-sync-sdc-a-access-package" - - depla := newDeployment("test-sync-sdc-a-" + uniuri.NewLen(4)) - depla.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depla.Spec.Image = util.NewString(img) - depla.Spec.Sync.Enabled = util.NewBool(true) - depla.Spec.Sync.ExternalAccess.Type = api.NewExternalAccessType(api.ExternalAccessTypeNone) - depla.Spec.Sync.ExternalAccess.AccessPackageSecretNames = []string{apname} - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depla, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment a failed: %v", err) - } - // Prepare cleanup - defer deferedCleanupDeployment(c, depla.GetName(), ns) - - deplb := newDeployment("test-sync-sdc-b-" + uniuri.NewLen(4)) - deplb.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - deplb.Spec.Image = util.NewString(img) - deplb.Spec.Sync.Enabled = util.NewBool(true) - deplb.Spec.Sync.ExternalAccess.Type = api.NewExternalAccessType(api.ExternalAccessTypeNone) - - // Create deployment - _, err = c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), deplb, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment b failed: %v", err) - } - // Prepare cleanup - defer deferedCleanupDeployment(c, deplb.GetName(), ns) - - // Wait for deployments to be ready - // Wait for access package - // Deploy access package - _, err = waitUntilSecret(kubecli, apname, ns, deploymentReadyTimeout) - if err != nil { - t.Fatalf("Failed to get access package: %v", err) - } - - // Deploy Replication Resource - repl := newReplication("test-sync-sdc-repl") - repl.Spec.Source.DeploymentName = util.NewString(depla.GetName()) - repl.Spec.Source.Authentication.KeyfileSecretName = util.NewString(apname) - repl.Spec.Source.TLS.CASecretName = util.NewString(apname) - repl.Spec.Destination.DeploymentName = util.NewString(deplb.GetName()) - _, err = c.ReplicationV1().ArangoDeploymentReplications(ns).Create(context.Background(), repl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create replication resource failed: %v", err) - } - defer deferedCleanupReplication(c, repl.GetName(), ns) - - deplaobj, err := waitUntilDeployment(c, depla.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment A not running in time: %v", err) - } - - deplbobj, err := waitUntilDeployment(c, deplb.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment B not running in time: %v", err) - } - - // Create a database in DC-A - // Wait for database in DC-B - time.Sleep(10 * time.Second) - testdbname := "replicated-db" - - ctx := context.Background() - clienta := mustNewArangodDatabaseClient(ctx, kubecli, deplaobj, t, nil) - if _, err := clienta.CreateDatabase(ctx, testdbname, nil); err != nil { - t.Fatalf("Failed to create database in a: %v", err) - } - - clientb := mustNewArangodDatabaseClient(ctx, kubecli, deplbobj, t, nil) - retry.Retry(func() error { - if ok, err := clientb.DatabaseExists(ctx, testdbname); err != nil { - return err - } else if !ok { - return fmt.Errorf("Database does not exist") - } - return nil - }, time.Minute) - - // Disable replication - removeReplication(c, repl.GetName(), ns) - if err := waitUntilReplicationNotFound(ns, repl.GetName(), c); err != nil { - t.Errorf("Could not remove replication resource: %v", err) - } - - // Cleanup - removeDeployment(c, deplb.GetName(), ns) - removeDeployment(c, depla.GetName(), ns) -} - -// TestSyncToggleEnabled tests a normal cluster and enables sync later. -// Once sync is active, it is disabled again. -func TestSyncToggleEnabled(t *testing.T) { - longOrSkip(t) - img := getEnterpriseImageOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - // Prepare deployment config - depl := newDeployment("test-sync-toggle-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster) - depl.Spec.Image = util.NewString(img) - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - // Prepare cleanup - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, apiObject.Spec) - }); err != nil { - t.Fatalf("Cluster not running in expected health in time: %v", err) - } - - // Enable sync - updated, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.Sync.Enabled = util.NewBool(true) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait until sync jwt secret has been created - if _, err := waitUntilSecret(kubecli, updated.Spec.Sync.Authentication.GetJWTSecretName(), ns, deploymentReadyTimeout); err != nil { - t.Fatalf("Sync JWT secret not created in time: %v", err) - } - - // Create a syncmaster client - syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t) - - // Wait for syncmasters to be available - if err := waitUntilSyncVersionUp(syncClient); err != nil { - t.Fatalf("SyncMasters not running returning version in time: %v", err) - } - - // Wait for cluster to reach new size - if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error { - return clusterHealthEqualsSpec(h, updated.Spec) - }); err != nil { - t.Fatalf("Cluster not running, after scale-up, in expected health in time: %v", err) - } - // Check number of syncmasters - if err := waitUntilSyncMasterCountReached(syncClient, 3); err != nil { - t.Fatalf("Unexpected #syncmasters, after enabling sync: %v", err) - } - // Check number of syncworkers - if err := waitUntilSyncWorkerCountReached(syncClient, 3); err != nil { - t.Fatalf("Unexpected #syncworkers, after enabling sync: %v", err) - } - - // Disable sync - updated, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) { - spec.Sync.Enabled = util.NewBool(false) - }) - if err != nil { - t.Fatalf("Failed to update deployment: %v", err) - } - - // Wait for deployment to have no more syncmasters & workers - if _, err := waitUntilDeployment(c, depl.GetName(), ns, func(apiObject *api.ArangoDeployment) error { - if cnt := len(apiObject.Status.Members.SyncMasters); cnt > 0 { - return maskAny(fmt.Errorf("Expected 0 syncmasters, got %d", cnt)) - } - if cnt := len(apiObject.Status.Members.SyncWorkers); cnt > 0 { - return maskAny(fmt.Errorf("Expected 0 syncworkers, got %d", cnt)) - } - return nil - }); err != nil { - t.Fatalf("Failed to reach deployment state without syncmasters & syncworkers: %v", err) - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/test_util.go b/tests/test_util.go deleted file mode 100644 index bdecc50d2..000000000 --- a/tests/test_util.go +++ /dev/null @@ -1,939 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020-2021 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Ewout Prangsma -// Author Tomasz Mielech -// - -package tests - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "os" - "reflect" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - "github.com/arangodb/kube-arangodb/pkg/apis/replication" - - "github.com/stretchr/testify/require" - - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - - "github.com/arangodb/arangosync-client/client" - "github.com/arangodb/arangosync-client/tasks" - driver "github.com/arangodb/go-driver" - vst "github.com/arangodb/go-driver/vst" - vstProtocol "github.com/arangodb/go-driver/vst/protocol" - "github.com/pkg/errors" - "github.com/rs/zerolog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - rapi "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1" - cl "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/arangod" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/arangodb/kube-arangodb/pkg/util/retry" -) - -const ( - deploymentReadyTimeout = time.Minute * 4 - deploymentUpgradeTimeout = time.Minute * 20 -) - -var ( - maskAny = errors.WithStack - syncClientCache client.ClientCache - showEnterpriseImageOnce sync.Once -) - -// createArangodVSTClientForDNSName creates a go-driver client for a given DNS name. -func createArangodVSTClientForDNSName(apiObject *api.ArangoDeployment, dnsName string, shortTimeout bool) (driver.Client, error) { - config := driver.ClientConfig{} - connConfig := createArangodVSTConfigForDNSNames(apiObject, []string{dnsName}, shortTimeout) - // TODO deal with TLS with proper CA checking - conn, err := vst.NewConnection(connConfig) - if err != nil { - return nil, maskAny(err) - } - - // Create client - config = driver.ClientConfig{ - Connection: conn, - } - - auth := driver.BasicAuthentication("root", "") - if err != nil { - return nil, maskAny(err) - } - config.Authentication = auth - c, err := driver.NewClient(config) - if err != nil { - return nil, maskAny(err) - } - return c, nil -} - -// createArangodVSTConfigForDNSNames creates a go-driver VST connection config for a given DNS names. -func createArangodVSTConfigForDNSNames(apiObject *api.ArangoDeployment, dnsNames []string, shortTimeout bool) vst.ConnectionConfig { - scheme := "http" - tlsConfig := &tls.Config{} - timeout := 90 * time.Second - if shortTimeout { - timeout = 100 * time.Millisecond - } - if apiObject != nil && apiObject.Spec.IsSecure() { - scheme = "https" - tlsConfig = &tls.Config{InsecureSkipVerify: true} - } - transport := vstProtocol.TransportConfig{ - IdleConnTimeout: timeout, - Version: vstProtocol.Version1_1, - } - connConfig := vst.ConnectionConfig{ - TLSConfig: tlsConfig, - Transport: transport, - } - for _, dnsName := range dnsNames { - connConfig.Endpoints = append(connConfig.Endpoints, scheme+"://"+net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort))) - } - return connConfig -} - -// createArangodDatabaseVSTClient creates a go-driver client for accessing the entire cluster (or single server) via VST -func createArangodDatabaseVSTClient(apiObject *api.ArangoDeployment, shortTimeout bool) (driver.Client, error) { - // Create connection - dnsName := k8sutil.CreateDatabaseClientServiceDNSName(apiObject) - c, err := createArangodVSTClientForDNSName(apiObject, dnsName, shortTimeout) - if err != nil { - return nil, maskAny(err) - } - return c, nil -} - -// longOrSkip checks the short test flag. -// If short is set, the current test is skipped. -// If not, this function returns as normal. -func longOrSkip(t *testing.T) { - if testing.Short() { - t.Skip("Test skipped in short test") - } -} - -// getEnterpriseImageOrSkip returns the docker image used for enterprise -// tests. If empty, enterprise tests are skipped. -func getEnterpriseImageOrSkip(t *testing.T) string { - image := strings.TrimSpace(os.Getenv("ENTERPRISEIMAGE")) - if image == "" { - t.Skip("Skipping test because ENTERPRISEIMAGE is not set") - } else { - showEnterpriseImageOnce.Do(func() { - t.Logf("Using enterprise image: %s", image) - }) - } - return image -} - -const testEnterpriseLicenseKeySecretName = "arangodb-jenkins-license-key" -const testBackupRemoteSecretName = "arangodb-backup-remote-secret" - -// shouldCleanDeployments returns true when deployments created -// by tests should be removed, even when the test fails. -func shouldCleanDeployments() bool { - return os.Getenv("CLEANDEPLOYMENTS") != "" -} - -// mustNewKubeClient creates a kubernetes client -// failing the test on errors. -func mustNewKubeClient(t *testing.T) kubernetes.Interface { - c, err := k8sutil.NewKubeClient() - if err != nil { - t.Fatalf("Failed to create kube cli: %v", err) - } - return c -} - -// DatabaseClientOptions contains options for creating an ArangoDB database client. -type DatabaseClientOptions struct { - ShortTimeout bool // If set, the connection timeout is set very short - UseVST bool // If set, a VST connection is created instead of an HTTP connection -} - -// mustNewArangodDatabaseClient creates a new database client, -// failing the test on errors. -func mustNewArangodDatabaseClient(ctx context.Context, kubecli kubernetes.Interface, apiObject *api.ArangoDeployment, t *testing.T, options *DatabaseClientOptions) driver.Client { - var c driver.Client - var err error - shortTimeout := options != nil && options.ShortTimeout - useVST := options != nil && options.UseVST - if useVST { - c, err = createArangodDatabaseVSTClient(apiObject, shortTimeout) - } else { - c, err = arangod.CreateArangodDatabaseClient(ctx, kubecli.CoreV1(), apiObject, shortTimeout) - } - if err != nil { - t.Fatalf("Failed to create arango database client: %v", err) - } - return c -} - -// mustNewArangoSyncClient creates a new arangosync client, with all syncmasters -// as endpoint. It is failing the test on errors. -func mustNewArangoSyncClient(ctx context.Context, kubecli kubernetes.Interface, apiObject *api.ArangoDeployment, t *testing.T) client.API { - ns := apiObject.GetNamespace() - secrets := kubecli.CoreV1().Secrets(ns) - secretName := apiObject.Spec.Sync.Authentication.GetJWTSecretName() - jwtToken, err := k8sutil.GetTokenSecret(ctx, secrets, secretName) - if err != nil { - t.Fatalf("Failed to get sync jwt secret '%s': %s", secretName, err) - } - - // Fetch service DNS name - dnsName := k8sutil.CreateSyncMasterClientServiceDNSName(apiObject) - ep := client.Endpoint{"https://" + net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoSyncMasterPort))} - - // Build client - log := zerolog.Logger{} - tlsAuth := tasks.TLSAuthentication{} - auth := client.NewAuthentication(tlsAuth, jwtToken) - insecureSkipVerify := true - c, err := syncClientCache.GetClient(log, ep, auth, insecureSkipVerify) - if err != nil { - t.Fatalf("Failed to get sync client: %s", err) - } - return c -} - -// getNamespace returns the kubernetes namespace in which to run tests. -func getNamespace(t *testing.T) string { - ns := os.Getenv("TEST_NAMESPACE") - if ns == "" { - t.Fatal("Missing environment variable TEST_NAMESPACE") - } - return ns -} - -func newReplication(name string) *rapi.ArangoDeploymentReplication { - repl := &rapi.ArangoDeploymentReplication{ - TypeMeta: metav1.TypeMeta{ - APIVersion: rapi.SchemeGroupVersion.String(), - Kind: replication.ArangoDeploymentReplicationResourceKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(name), - }, - } - - return repl -} - -// newDeployment creates a basic ArangoDeployment with configured -// type, name and image. -func newDeployment(name string) *api.ArangoDeployment { - depl := &api.ArangoDeployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: api.SchemeGroupVersion.String(), - Kind: deployment.ArangoDeploymentResourceKind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(name), - }, - Spec: api.DeploymentSpec{ - ImagePullPolicy: util.NewPullPolicy(v1.PullAlways), - License: api.LicenseSpec{ - SecretName: util.NewString(testEnterpriseLicenseKeySecretName), - }, - }, - } - - // set default image to the value given in env - // some tests will override this value if they need a specific version - // like update tests - // if no value is given, use the operator default, which is arangodb/arangodb:latest - image := strings.TrimSpace(os.Getenv("ARANGODIMAGE")) - if image != "" { - depl.Spec.Image = util.NewString(image) - } - - disableIPv6 := strings.TrimSpace(os.Getenv("TESTDISABLEIPV6")) - if disableIPv6 != "" && disableIPv6 != "0" { - depl.Spec.DisableIPv6 = util.NewBool(true) - } - - return depl -} - -// waitUntilDeployment waits until a deployment members are ready with given name in given namespace -func waitUntilDeploymentMembers(cli versioned.Interface, deploymentName, ns string, cb api.ServerGroupFunc, - timeout ...time.Duration) (*api.ArangoDeployment, error) { - - return waitUntilDeployment(cli, deploymentName, ns, func(d *api.ArangoDeployment) error { - return d.ForeachServerGroup(cb, &d.Status) - }, timeout...) -} - -func newDeploymentWithValidation(name string, adjustDeployment func(*api.ArangoDeployment)) (*api.ArangoDeployment, error) { - deployment := newDeployment(name) - adjustDeployment(deployment) - - deployment.Spec.SetDefaults(deployment.GetName()) - if err := deployment.Spec.Validate(); err != nil { - return nil, err - } - - return deployment, nil -} - -// waitUntilDeployment waits until a deployment with given name in given namespace -// reached a state where the given predicate returns true. -func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, predicate func(*api.ArangoDeployment) error, timeout ...time.Duration) (*api.ArangoDeployment, error) { - var result *api.ArangoDeployment - op := func() error { - obj, err := cli.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), deploymentName, metav1.GetOptions{}) - if err != nil { - result = nil - return maskAny(err) - } - result = obj - if predicate != nil { - if err := predicate(obj); err != nil { - return maskAny(err) - } - } - return nil - } - actualTimeout := deploymentReadyTimeout - if len(timeout) > 0 { - actualTimeout = timeout[0] - } - if err := retry.Retry(op, actualTimeout); err != nil { - return nil, maskAny(err) - } - return result, nil -} - -// waitUntilSecret waits until a secret with given name in given namespace -// reached a state where the given predicate returns true. -func waitUntilSecret(cli kubernetes.Interface, secretName, ns string, timeout time.Duration) (*v1.Secret, error) { - var result *v1.Secret - op := func() error { - obj, err := cli.CoreV1().Secrets(ns).Get(context.Background(), secretName, metav1.GetOptions{}) - if err != nil { - result = nil - return maskAny(err) - } - result = obj - return nil - } - if err := retry.Retry(op, timeout); err != nil { - return nil, maskAny(err) - } - return result, nil -} - -// waitUntilService waits until a service with given name in given -// namespace exists and has reached a state where the given predicate -// returns nil. -func waitUntilService(cli kubernetes.Interface, serviceName, ns string, predicate func(*v1.Service) error, timeout time.Duration) (*v1.Service, error) { - var result *v1.Service - op := func() error { - obj, err := cli.CoreV1().Services(ns).Get(context.Background(), serviceName, metav1.GetOptions{}) - if err != nil { - result = nil - return maskAny(err) - } - result = obj - if predicate != nil { - if err := predicate(obj); err != nil { - return maskAny(err) - } - } - return nil - } - if err := retry.Retry(op, timeout); err != nil { - return nil, maskAny(err) - } - return result, nil -} - -// waitUntilEndpoints waits until an endpoints resource with given name -// in given namespace exists and has reached a state where the given -// predicate returns nil. -func waitUntilEndpoints(cli kubernetes.Interface, serviceName, ns string, predicate func(*v1.Endpoints) error, timeout time.Duration) (*v1.Endpoints, error) { - var result *v1.Endpoints - op := func() error { - obj, err := cli.CoreV1().Endpoints(ns).Get(context.Background(), serviceName, metav1.GetOptions{}) - if err != nil { - result = nil - return maskAny(err) - } - result = obj - if predicate != nil { - if err := predicate(obj); err != nil { - return maskAny(err) - } - } - return nil - } - if err := retry.Retry(op, timeout); err != nil { - return nil, maskAny(err) - } - return result, nil -} - -// waitUntilSecretNotFound waits until a secret with given name in given namespace -// is no longer found. -func waitUntilSecretNotFound(cli kubernetes.Interface, secretName, ns string, timeout time.Duration) error { - op := func() error { - if _, err := cli.CoreV1().Secrets(ns).Get(context.Background(), secretName, metav1.GetOptions{}); k8sutil.IsNotFound(err) { - return nil - } else if err != nil { - return maskAny(err) - } - return maskAny(fmt.Errorf("Secret %s still there", secretName)) - } - if err := retry.Retry(op, timeout); err != nil { - return maskAny(err) - } - return nil -} - -// waitUntilClusterHealth waits until an arango cluster -// reached a state where the given predicate returns nil. -func waitUntilClusterHealth(cli driver.Client, predicate func(driver.ClusterHealth) error, timeout ...time.Duration) error { - ctx := context.Background() - op := func() error { - cluster, err := cli.Cluster(ctx) - if err != nil { - return maskAny(err) - } - h, err := cluster.Health(ctx) - if err != nil { - return maskAny(err) - } - if predicate != nil { - if err := predicate(h); err != nil { - return maskAny(err) - } - } - return nil - } - actualTimeout := deploymentReadyTimeout - if len(timeout) > 0 { - actualTimeout = timeout[0] - } - if err := retry.Retry(op, actualTimeout); err != nil { - return maskAny(err) - } - return nil -} - -// waitUntilClusterVersionUp waits until an arango cluster is healthy and -// all servers are running the given version. -func waitUntilClusterVersionUp(cli driver.Client, version driver.Version) error { - return waitUntilClusterHealth(cli, func(h driver.ClusterHealth) error { - for s, r := range h.Health { - if cmp := r.Version.CompareTo(version); cmp != 0 { - return maskAny(fmt.Errorf("Member %s has version %s, expecting %s", s, r.Version, version)) - } - } - - return nil - }, deploymentUpgradeTimeout) -} - -// waitUntilVersionUp waits until the arango database responds to -// an `/_api/version` request without an error. An additional Predicate -// can do a check on the VersionInfo object returned by the server. -func waitUntilVersionUp(cli driver.Client, predicate func(driver.VersionInfo) error, allowNoLeaderResponse ...bool) error { - var noLeaderErr error - allowNoLead := len(allowNoLeaderResponse) > 0 && allowNoLeaderResponse[0] - ctx := context.Background() - - op := func() error { - if version, err := cli.Version(ctx); allowNoLead && driver.IsNoLeader(err) { - noLeaderErr = err - return nil //return nil to make the retry below pass - } else if err != nil { - return maskAny(err) - } else if predicate != nil { - return predicate(version) - } - return nil - } - - if err := retry.Retry(op, deploymentReadyTimeout); err != nil { - return maskAny(err) - } - - // noLeadErr updated in op - if noLeaderErr != nil { - return maskAny(noLeaderErr) - } - - return nil -} - -// waitUntilSyncVersionUp waits until the syncmasters responds to -// an `/_api/version` request without an error. An additional Predicate -// can do a check on the VersionInfo object returned by the server. -func waitUntilSyncVersionUp(cli client.API) error { - ctx := context.Background() - - op := func() error { - if _, err := cli.Version(ctx); err != nil { - return maskAny(err) - } - return nil - } - - if err := retry.Retry(op, deploymentReadyTimeout); err != nil { - return maskAny(err) - } - - return nil -} - -// waitUntilSyncMasterCountReached waits until the number of syncmasters -// is equal to the given number. -func waitUntilSyncMasterCountReached(cli client.API, expectedSyncMasters int) error { - ctx := context.Background() - - op := func() error { - if list, err := cli.Master().Masters(ctx); err != nil { - return maskAny(err) - } else if len(list) != expectedSyncMasters { - return maskAny(fmt.Errorf("Expected %d syncmasters, got %d", expectedSyncMasters, len(list))) - } - return nil - } - - if err := retry.Retry(op, deploymentReadyTimeout); err != nil { - return maskAny(err) - } - - return nil -} - -// waitUntilSyncWorkerCountReached waits until the number of syncworkers -// is equal to the given number. -func waitUntilSyncWorkerCountReached(cli client.API, expectedSyncWorkers int) error { - ctx := context.Background() - - op := func() error { - if list, err := cli.Master().RegisteredWorkers(ctx); err != nil { - return maskAny(err) - } else if len(list) != expectedSyncWorkers { - return maskAny(fmt.Errorf("Expected %d syncworkers, got %d", expectedSyncWorkers, len(list))) - } - return nil - } - - if err := retry.Retry(op, deploymentReadyTimeout); err != nil { - return maskAny(err) - } - - return nil -} - -// creates predicate to be used in waitUntilVersionUp -func createEqualVersionsPredicate(version driver.Version) func(driver.VersionInfo) error { - return func(infoFromServer driver.VersionInfo) error { - if version.CompareTo(infoFromServer.Version) != 0 { - return maskAny(fmt.Errorf("given version %v and version from server %v do not match", version, infoFromServer.Version)) - } - return nil - } -} - -// clusterSidecarsEqualSpec returns nil if sidecars from spec and cluster match -func waitUntilClusterSidecarsEqualSpec(t *testing.T, depl api.ArangoDeployment) error { - - c := cl.MustNewClient() - ns := getNamespace(t) - - var noGood int - for start := time.Now(); time.Since(start) < 600*time.Second; { - - // Fetch latest status so we know all member details - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), depl.GetName(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get deployment: %v", err) - } - - // How many pods not matching - noGood = 0 - - // Check member after another - apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - for _, m := range *status { - if len(m.SideCarSpecs) != len(spec.GetSidecars()) { - noGood++ - continue - } - for _, scar := range spec.GetSidecars() { - mcar, found := m.SideCarSpecs[scar.Name] - if found { - if !reflect.DeepEqual(mcar, scar) { - noGood++ - } - } else { - noGood++ - } - } - } - return nil - }, &apiObject.Status) - - if noGood == 0 { - return nil - } - - time.Sleep(2 * time.Second) - } - - return maskAny(fmt.Errorf("%d pods with unmatched sidecars", noGood)) -} - -// clusterHealthEqualsSpec returns nil when the given health matches -// with the given deployment spec. -func clusterHealthEqualsSpec(h driver.ClusterHealth, spec api.DeploymentSpec) error { - agents := 0 - goodDBServers := 0 - goodCoordinators := 0 - for _, s := range h.Health { - if s.Role == driver.ServerRoleAgent { - agents++ - } else if s.Status == driver.ServerStatusGood { - switch s.Role { - case driver.ServerRoleDBServer: - goodDBServers++ - case driver.ServerRoleCoordinator: - goodCoordinators++ - } - } - } - if spec.Agents.GetCount() == agents && - spec.DBServers.GetCount() == goodDBServers && - spec.Coordinators.GetCount() == goodCoordinators { - return nil - } - return fmt.Errorf("Expected %d,%d,%d got %d,%d,%d", - spec.Agents.GetCount(), spec.DBServers.GetCount(), spec.Coordinators.GetCount(), - agents, goodDBServers, goodCoordinators, - ) -} - -// updateDeployment updates a deployment -func updateDeployment(cli versioned.Interface, deploymentName, ns string, update func(*api.DeploymentSpec)) (*api.ArangoDeployment, error) { - for { - // Get current version - current, err := cli.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), deploymentName, metav1.GetOptions{}) - if err != nil { - return nil, maskAny(err) - } - update(¤t.Spec) - current, err = cli.DatabaseV1().ArangoDeployments(ns).Update(context.Background(), current, metav1.UpdateOptions{}) - if k8sutil.IsConflict(err) { - // Retry - continue - } else if err != nil { - return nil, maskAny(err) - } - return current, nil - } -} - -// removeDeployment removes a deployment -func removeDeployment(cli versioned.Interface, deploymentName, ns string) error { - if err := cli.DatabaseV1().ArangoDeployments(ns).Delete(context.Background(), deploymentName, metav1.DeleteOptions{}); err != nil && k8sutil.IsNotFound(err) { - return maskAny(err) - } - return nil -} - -// removeReplication removes a replication -func removeReplication(cli versioned.Interface, replicationName, ns string) error { - if err := cli.ReplicationV1().ArangoDeploymentReplications(ns).Delete(context.Background(), replicationName, metav1.DeleteOptions{}); err != nil && k8sutil.IsNotFound(err) { - return maskAny(err) - } - return nil -} - -// deferedCleanupDeployment removes a deployment when shouldCleanDeployments return true. -// This function is intended to be used in a defer statement. -func deferedCleanupDeployment(cli versioned.Interface, deploymentName, ns string) error { - if !shouldCleanDeployments() { - return nil - } - if err := removeDeployment(cli, deploymentName, ns); err != nil { - return maskAny(err) - } - return nil -} - -// deferedCleanupReplication removes a replication when shouldCleanDeployments return true. -// This function is intended to be used in a defer statement. -func deferedCleanupReplication(cli versioned.Interface, replicationName, ns string) error { - if !shouldCleanDeployments() { - return nil - } - if err := removeReplication(cli, replicationName, ns); err != nil { - return maskAny(err) - } - return nil -} - -// removeSecret removes a secret -func removeSecret(cli kubernetes.Interface, secretName, ns string) error { - if err := cli.CoreV1().Secrets(ns).Delete(context.Background(), secretName, metav1.DeleteOptions{}); err != nil && k8sutil.IsNotFound(err) { - return maskAny(err) - } - return nil -} - -// check if a deployment is up and has reached a state where it is able to answer to /_api/version requests. -// Optionally the returned version can be checked against a user provided version -func waitUntilArangoDeploymentHealthy(deployment *api.ArangoDeployment, DBClient driver.Client, k8sClient kubernetes.Interface, versionString driver.Version) error { - // deployment checks - var checkVersionPredicate func(driver.VersionInfo) error - if len(versionString) > 0 { - checkVersionPredicate = createEqualVersionsPredicate(versionString) - } - switch mode := deployment.Spec.GetMode(); mode { - case api.DeploymentModeCluster: - // Wait for cluster to be completely ready - if err := waitUntilClusterHealth(DBClient, func(h driver.ClusterHealth) error { - if len(versionString) > 0 { - for s, r := range h.Health { - if r.Version == "" { // Older versions of arangodb do not export the version string in cluster health - continue - } - if cmp := r.Version.CompareTo(versionString); cmp != 0 { - return maskAny(fmt.Errorf("Member %s has version %s, expecting %s", s, r.Version, versionString)) - } - } - } - - return clusterHealthEqualsSpec(h, deployment.Spec) - }); err != nil { - return maskAny(fmt.Errorf("Cluster not running in expected health in time: %s", err)) - } - case api.DeploymentModeSingle: - if err := waitUntilVersionUp(DBClient, checkVersionPredicate); err != nil { - return maskAny(fmt.Errorf("Single Server not running in time: %s", err)) - } - case api.DeploymentModeActiveFailover: - if err := waitUntilVersionUp(DBClient, checkVersionPredicate); err != nil { - return maskAny(fmt.Errorf("Single Server not running in time: %s", err)) - } - - members := deployment.Status.Members - singles := members.Single - agents := members.Agents - - if len(singles) != *deployment.Spec.Single.Count || len(agents) != *deployment.Spec.Agents.Count { - return maskAny(fmt.Errorf("Wrong number of servers: single %d - agents %d", len(singles), len(agents))) - } - - ctx := context.Background() - - //check agents - for _, agent := range agents { - dbclient, err := arangod.CreateArangodClient(ctx, k8sClient.CoreV1(), deployment, api.ServerGroupAgents, agent.ID) - if err != nil { - return maskAny(fmt.Errorf("Unable to create connection to: %s", agent.ID)) - } - - if err := waitUntilVersionUp(dbclient, checkVersionPredicate); err != nil { - return maskAny(fmt.Errorf("Version check failed for: %s", agent.ID)) - } - } - //check single servers - { - var goodResults, noLeaderResults int - for _, single := range singles { - dbclient, err := arangod.CreateArangodClient(ctx, k8sClient.CoreV1(), deployment, api.ServerGroupSingle, single.ID) - if err != nil { - return maskAny(fmt.Errorf("Unable to create connection to: %s", single.ID)) - } - - if err := waitUntilVersionUp(dbclient, checkVersionPredicate, true); err == nil { - goodResults++ - } else if driver.IsNoLeader(err) { - noLeaderResults++ - } else { - return maskAny(fmt.Errorf("Version check failed for: %s", single.ID)) - } - } - - expectedGood := *deployment.Spec.Single.Count - expectedNoLeader := 0 - if goodResults != expectedGood || noLeaderResults != expectedNoLeader { - return maskAny(fmt.Errorf("Wrong number of results: good %d (expected: %d)- noleader %d (expected %d)", goodResults, expectedGood, noLeaderResults, expectedNoLeader)) - } - } - default: - return maskAny(fmt.Errorf("DeploymentMode %s is not supported", mode)) - } - return nil -} - -// testServerRole performs a synchronize endpoints and then requests the server role. -// On success, the role is compared with the given expected role. -// When the requests fail or the role is not equal to the expected role, an error is returned. -func testServerRole(ctx context.Context, client driver.Client, expectedRole driver.ServerRole) error { - op := func(ctx context.Context) error { - if err := client.SynchronizeEndpoints(ctx); err != nil { - return maskAny(err) - } - role, err := client.ServerRole(ctx) - if err != nil { - return maskAny(err) - } - if role != expectedRole { - return retry.Permanent(fmt.Errorf("Unexpected server role: Expected '%s', got '%s'", expectedRole, role)) - } - return nil - } - if err := retry.RetryWithContext(ctx, op, time.Second*20); err != nil { - return maskAny(err) - } - return nil -} - -func getPodCreationTimes(t *testing.T, kubecli kubernetes.Interface, depl *api.ArangoDeployment) map[string]metav1.Time { - ns := getNamespace(t) - podCreationTimes := make(map[string]metav1.Time) - depl.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error { - fmt.Printf("Looking at group %s with %d pods...\n", group.AsRole(), len(*status)) - for _, m := range *status { - // Get pod: - fmt.Printf("Looking at pod %s...\n", m.PodName) - pod, err := kubecli.CoreV1().Pods(ns).Get(context.Background(), m.PodName, metav1.GetOptions{}) - // Simply ignore error and skip pod: - if err == nil { - fmt.Printf("Found creation time of %v for pod %s\n", pod.GetCreationTimestamp(), m.PodName) - podCreationTimes[m.PodName] = pod.GetCreationTimestamp() - } else { - fmt.Printf("Could not get pod %s error: %v\n", m.PodName, err) - } - } - return nil - }, &depl.Status) - return podCreationTimes -} - -func checkPodCreationTimes(t *testing.T, kubecli kubernetes.Interface, depl *api.ArangoDeployment, times map[string]metav1.Time) { - foundTimes := getPodCreationTimes(t, kubecli, depl) - for name, timestamp := range times { - ti, found := foundTimes[name] - if !found { - t.Errorf("Did not find pod %s any more in creation time check!", name) - } else if ti != timestamp { - t.Errorf("Pod %s has been rotated unexpectedly in creation time check!", name) - } - } - if len(foundTimes) != len(times) { - t.Errorf("Number of pods found (%d) in creation time check does not match expected %d!", len(foundTimes), len(times)) - } -} - -type DocumentGenerator struct { - kubecli kubernetes.Interface - deployment *api.ArangoDeployment - collectionName string - numberOfShards uint32 - numberOfDocuments uint32 - documentsMeta driver.DocumentMetaSlice -} - -func NewDocumentGenerator(kubecli kubernetes.Interface, deployment *api.ArangoDeployment, - collectionName string, numberOfShards, numberOfDocuments uint32) *DocumentGenerator { - return &DocumentGenerator{ - kubecli: kubecli, - deployment: deployment, - collectionName: collectionName, - numberOfShards: numberOfShards, - numberOfDocuments: numberOfDocuments, - } -} - -func (d *DocumentGenerator) generate(t *testing.T, generator func(int) interface{}) { - - opts := &driver.CreateCollectionOptions{ - NumberOfShards: int(d.numberOfShards), - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - DBClient := mustNewArangodDatabaseClient(ctx, d.kubecli, d.deployment, t, nil) - db, err := DBClient.Database(ctx, "_system") - require.NoError(t, err, "failed to get database") - - collection, err := db.CreateCollection(context.Background(), d.collectionName, opts) - require.NoError(t, err, "failed to create collection") - - d.documentsMeta = make(driver.DocumentMetaSlice, d.numberOfDocuments) - items := make([]interface{}, int(d.numberOfDocuments)) - for i := 0; i < int(d.numberOfDocuments); i++ { - items[i] = generator(i) - } - - var errorSlice driver.ErrorSlice - errorSliceExpected := make(driver.ErrorSlice, int(d.numberOfDocuments)) - d.documentsMeta, errorSlice, err = collection.CreateDocuments(context.Background(), items) - require.NoError(t, err, "failed to create documents") - require.Equal(t, errorSlice, errorSliceExpected) -} - -func (d *DocumentGenerator) check(t *testing.T) { - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - DBClient := mustNewArangodDatabaseClient(ctx, d.kubecli, d.deployment, t, nil) - db, err := DBClient.Database(ctx, "_system") - require.NoError(t, err, "failed to get database") - - collection, err := db.Collection(context.Background(), d.collectionName) - require.NoError(t, err, "failed to create collection") - - count, err := collection.Count(context.Background()) - require.NoError(t, err, "failed to get number of documents in the collection") - require.Equal(t, int64(len(d.documentsMeta)), count, "number of documents are not equal") - - for _, m := range d.documentsMeta { - exist, err := collection.DocumentExists(context.Background(), m.Key) - require.NoError(t, err, "failed to create document") - require.Equal(t, true, exist, "document does not exits") - } -} diff --git a/tests/timeout.go b/tests/timeout.go deleted file mode 100644 index cb809eed7..000000000 --- a/tests/timeout.go +++ /dev/null @@ -1,41 +0,0 @@ -package tests - -import ( - "fmt" - "time" -) - -type interrupt struct { -} - -func (i interrupt) Error() string { - return "interrupted" -} - -func isInterrupt(err error) bool { - _, ok := err.(interrupt) - return ok -} - -func timeout(interval, timeout time.Duration, action func() error) error { - intervalT := time.NewTicker(interval) - defer intervalT.Stop() - - timeoutT := time.NewTimer(timeout) - defer timeoutT.Stop() - - for { - select { - case <-intervalT.C: - err := action() - if err != nil { - if isInterrupt(err) { - return nil - } - return err - } - case <-timeoutT.C: - return fmt.Errorf("function timeouted") - } - } -} diff --git a/tests/upgrade_test.go b/tests/upgrade_test.go deleted file mode 100644 index 8a86f2b4f..000000000 --- a/tests/upgrade_test.go +++ /dev/null @@ -1,280 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2020 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// -// Author Jan Christoph Uhde -// -package tests - -import ( - "context" - "fmt" - "strings" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - driver "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/dchest/uniuri" -) - -// func TestUpgradeClusterRocksDB33pto34p(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB, "arangodb/arangodb-preview:3.3", "arangodb/arangodb-preview:3.4") -// } - -// test upgrade single server mmfiles 3.2 -> 3.3 -// func TestUpgradeSingleMMFiles32to33(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeSingle, api.StorageEngineMMFiles, "arangodb/arangodb:3.2.16", "arangodb/arangodb:3.3.13") -// } - -// // test upgrade single server rocksdb 3.3 -> 3.4 -// func TestUpgradeSingleRocksDB33to34(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeSingle, api.StorageEngineRocksDB, "3.3.13", "3.4.0") -// } - -/*// test upgrade active-failover server rocksdb 3.3 -> 3.4 -func TestUpgradeActiveFailoverRocksDB33to34(t *testing.T) { - upgradeSubTest(t, api.DeploymentModeActiveFailover, api.StorageEngineRocksDB, "3.3.13", "3.4.0") -}*/ - -// // test upgrade active-failover server mmfiles 3.3 -> 3.4 -// func TestUpgradeActiveFailoverMMFiles33to34(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeActiveFailover, api.StorageEngineMMFiles, "3.3.13", "3.4.0") -// } - -// test upgrade cluster rocksdb 3.2 -> 3.3 -// func TestUpgradeClusterRocksDB32to33(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB, "3.2.16", "3.3.13") -// } - -// // test upgrade cluster mmfiles 3.3 -> 3.4 -// func TestUpgradeClusterMMFiles33to34(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB, "3.3.13", "3.4.0") -// } - -// // test downgrade single server mmfiles 3.3.17 -> 3.3.16 -// func TestDowngradeSingleMMFiles3317to3316(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeSingle, api.StorageEngineMMFiles, "arangodb/arangodb:3.3.16", "arangodb/arangodb:3.3.17") -// } - -// // test downgrade ActiveFailover server rocksdb 3.3.17 -> 3.3.16 -// func TestDowngradeActiveFailoverRocksDB3317to3316(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeActiveFailover, api.StorageEngineRocksDB, "arangodb/arangodb:3.3.16", "arangodb/arangodb:3.3.17") -// } - -// // test downgrade cluster rocksdb 3.3.17 -> 3.3.16 -// func TestDowngradeClusterRocksDB3317to3316(t *testing.T) { -// upgradeSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB, "arangodb/arangodb:3.3.16", "arangodb/arangodb:3.3.17") -// } - -func TestUpgradeClusterRocksDB3322Cto346C(t *testing.T) { - runUpgradeTest(t, &upgradeTest{ - fromVersion: "3.3.22", - toVersion: "3.4.6-1", - toImageTag: "3.4.6.1", - shortTest: true, - }) -} - -func TestUpgradeClusterRocksDB3316Cto3323C(t *testing.T) { - runUpgradeTest(t, &upgradeTest{ - fromVersion: "3.3.16", - toVersion: "3.3.23", - shortTest: false, - }) -} - -func TestUpgradeClusterRocksDB346Cto347C(t *testing.T) { - runUpgradeTest(t, &upgradeTest{ - fromVersion: "3.4.6-1", - fromImageTag: "3.4.6.1", - toVersion: "3.4.7", - shortTest: true, - }) -} - -func TestUpgradeClusterRocksDB348Eto351E(t *testing.T) { - runUpgradeTest(t, &upgradeTest{ - fromVersion: "3.4.8", - toVersion: "3.5.1", - toImage: "arangodb/enterprise-preview", - shortTest: true, - }) -} - -type upgradeTest struct { - fromVersion string - toVersion string - - // Mode describes the deployment mode of the upgrade test, defaults to Cluster - mode api.DeploymentMode - // Engine describes the deployment storage engine, defaults to RocksDB - engine api.StorageEngine - - // fromImage describes the image of the version from which the upgrade should start, defaults to "arangodb/arangodb:" - fromImage string - fromImageTag string - - // toImage describes the image of the version to which the upgrade should start, defaults to "arangodb/arangodb:" - toImage string - toImageTag string - - toEnterprise bool - fromEnterprise bool - - name string - shortTest bool -} - -type UpgradeTest interface { - FromVersion() driver.Version - ToVersion() driver.Version - - Name() string - FromImage() string - ToImage() string - - Mode() api.DeploymentMode - Engine() api.StorageEngine - - IsShortTest() bool -} - -func (u *upgradeTest) FromImage() string { - imageName := "arangodb/arangodb" - if u.fromEnterprise { - imageName = "arangodb/enterprise" - } - if u.fromImage != "" { - imageName = u.fromImage - } - imageTag := u.fromVersion - if u.fromImageTag != "" { - imageTag = u.fromImageTag - } - return fmt.Sprintf("%s:%s", imageName, imageTag) -} - -func (u *upgradeTest) ToImage() string { - imageName := "arangodb/arangodb" - if u.toEnterprise { - imageName = "arangodb/enterprise" - } - if u.toImage != "" { - imageName = u.toImage - } - imageTag := u.toVersion - if u.toImageTag != "" { - imageTag = u.toImageTag - } - return fmt.Sprintf("%s:%s", imageName, imageTag) -} - -func (u *upgradeTest) Mode() api.DeploymentMode { - if u.mode != "" { - return u.mode - } - return api.DeploymentModeCluster -} - -func (u *upgradeTest) Engine() api.StorageEngine { - if u.engine != "" { - return u.engine - } - return api.StorageEngineRocksDB -} - -func (u *upgradeTest) Name() string { - if u.name != "" { - return u.name - } - - return strings.Replace(fmt.Sprintf("%s-to-%s", u.FromVersion(), u.ToVersion()), ".", "-", -1) -} - -func (u *upgradeTest) FromVersion() driver.Version { - return driver.Version(u.fromVersion) -} - -func (u *upgradeTest) ToVersion() driver.Version { - return driver.Version(u.toVersion) -} - -func (u *upgradeTest) IsShortTest() bool { - return u.shortTest -} - -func runUpgradeTest(t *testing.T, spec UpgradeTest) { - if !spec.IsShortTest() { - longOrSkip(t) - } - - ns := getNamespace(t) - kubecli := mustNewKubeClient(t) - c := kubeArangoClient.MustNewClient() - - depl := newDeployment(fmt.Sprintf("tu-%s-%s", spec.Name(), uniuri.NewLen(4))) - depl.Spec.Mode = api.NewMode(spec.Mode()) - depl.Spec.StorageEngine = api.NewStorageEngine(spec.Engine()) - depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert - depl.Spec.Image = util.NewString(spec.FromImage()) - depl.Spec.SetDefaults(depl.GetName()) // this must be last - - // Create deployment - _, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - - // Wait for deployment to be ready - deployment, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - // Create a database client - ctx := context.Background() - DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil) - if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, spec.FromVersion()); err != nil { - t.Fatalf("Deployment not healthy in time: %v", err) - } - - // Try to change image version - _, err = updateDeployment(c, depl.GetName(), ns, - func(depl *api.DeploymentSpec) { - depl.Image = util.NewString(spec.ToImage()) - }) - if err != nil { - t.Fatalf("Failed to upgrade the Image from version : " + spec.FromImage() + " to version: " + spec.ToImage()) - } else { - t.Log("Updated deployment") - } - - if err := waitUntilClusterVersionUp(DBClient, spec.ToVersion()); err != nil { - t.Errorf("Deployment not healthy in time: %v", err) - } else { - t.Log("Deployment healthy") - } - - // Cleanup - removeDeployment(c, depl.GetName(), ns) -} diff --git a/tests/version_test.go b/tests/version_test.go deleted file mode 100644 index 14bcc77a7..000000000 --- a/tests/version_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package tests - -import ( - "context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - - "github.com/arangodb/go-driver" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/arangodb/kube-arangodb/pkg/client" - "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/arangodb/kube-arangodb/pkg/util/arangod" - "github.com/dchest/uniuri" -) - -func TestIsVersionSet(t *testing.T) { - longOrSkip(t) - c := client.MustNewClient() - kubecli := mustNewKubeClient(t) - ns := getNamespace(t) - - expectedVersion := driver.Version("3.3.17") - // Prepare deployment config - depl := newDeployment("test-auth-sng-def-" + uniuri.NewLen(4)) - depl.Spec.Mode = api.NewMode(api.DeploymentModeSingle) - depl.Spec.SetDefaults(depl.GetName()) - depl.Spec.Image = util.NewString("arangodb/arangodb:" + string(expectedVersion)) - // Create deployment - apiObject, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("Create deployment failed: %v", err) - } - defer deferedCleanupDeployment(c, depl.GetName(), ns) - // Wait for deployment to be ready - depl, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady()) - if err != nil { - t.Fatalf("Deployment not running in time: %v", err) - } - - single := depl.Status.Members.Single - if single == nil || len(single) == 0 { - t.Fatalf("single member is empty") - } - - if single[0].ArangoVersion.CompareTo(expectedVersion) != 0 { - t.Fatalf("version %s has not been set for the single member status", expectedVersion) - } - - // Create a database client - ctx := arangod.WithRequireAuthentication(context.Background()) - client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t, nil) - - // Wait for single server available with a valid database version - err = waitUntilVersionUp(client, func(version driver.VersionInfo) error { - if version.Version.CompareTo(expectedVersion) != 0 { - t.Fatalf("database version %s is not equal expected version %s", version.Version, version) - } - return nil - }) - if err != nil { - t.Fatalf("Single server not running returning version in time: %v", err) - } - - //Cleanup - removeDeployment(c, depl.GetName(), ns) -}