diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..a3aab7af --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ diff --git a/.gitignore b/.gitignore index 3e0efa51..11512043 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ __pycache__/* */.ipynb_checkpoints/* .DS_Store .tags +bin # Project files .ropeproject diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..aac8a13f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,47 @@ +run: + timeout: 5m + allow-parallel-runners: true + +issues: + # don't skip warning about doc comments + # don't exclude the default set of lint + exclude-use-default: false + # restore some of the defaults + # (fill in the rest as needed) + exclude-rules: + - path: "api/*" + linters: + - lll + - path: "internal/*" + linters: + - dupl + - lll +linters: + disable-all: true + enable: + - dupl + - errcheck + - exportloopref + - ginkgolinter + - goconst + - gocyclo + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + revive: + rules: + - name: comment-spacings diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..4ba18b68 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.22 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index 69fe55ec..1b58704e 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,202 @@ -# Minimal makefile for Sphinx documentation -# +# Image URL to use all building/pushing image targets +IMG ?= registry.nordix.org/eiffel/etos-controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.30.0 -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = source -BUILDDIR = build +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker -.PHONY: help Makefile +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. +test-e2e: + go test ./test/e2e/ -v -ginkgo.v + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name etos-builder + $(CONTAINER_TOOL) buildx use etos-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm etos-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +.PHONY: split-installer +split-installer: build-installer + python scripts/split_installer.py dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) +ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.1 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +ENVTEST_VERSION ?= release-0.18 +GOLANGCI_LINT_VERSION ?= v1.57.2 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f $(1) ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ +} +endef diff --git a/PROJECT b/PROJECT new file mode 100644 index 00000000..5663972c --- /dev/null +++ b/PROJECT @@ -0,0 +1,65 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: eiffel-community.github.io +layout: +- go.kubebuilder.io/v4 +projectName: etos +repo: github.com/eiffel-community/etos +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: eiffel-community.github.io + group: etos + kind: TestRun + path: github.com/eiffel-community/etos/api/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: eiffel-community.github.io + group: etos + kind: Provider + path: github.com/eiffel-community/etos/api/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: eiffel-community.github.io + group: etos + kind: Environment + path: github.com/eiffel-community/etos/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: eiffel-community.github.io + group: etos + kind: Cluster + path: github.com/eiffel-community/etos/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: eiffel-community.github.io + group: etos + kind: EnvironmentRequest + path: github.com/eiffel-community/etos/api/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + webhookVersion: v1 +version: "3" diff --git a/README.rst b/README.md similarity index 100% rename from README.rst rename to README.md diff --git a/api/v1alpha1/cluster_types.go b/api/v1alpha1/cluster_types.go new file mode 100644 index 00000000..61301c81 --- /dev/null +++ b/api/v1alpha1/cluster_types.go @@ -0,0 +1,216 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type MongoDB struct { + // +kubebuilder:default=false + // +optional + Deploy bool `json:"deploy"` + // Ignored if deploy is true + // +kubebuilder:default={"value": "mongodb://root:password@mongodb:27017/admin"} + // +optional + URI Var `json:"uri"` +} + +type EventRepository struct { + // Deploy a local event repository for a cluster. + // +kubebuilder:default=false + // +optional + Deploy bool `json:"deploy"` + + // We do not build the GraphQL API automatically nor publish it remotely. + // This will need to be provided to work. + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/eiffel-graphql-api:latest"} + // +optional + API Image `json:"api"` + + // We do not build the GraphQL API automatically nor publish it remotely. + // This will need to be provided to work. + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/eiffel-graphql-storage:latest"} + // +optional + Storage Image `json:"storage"` + + // +kubebuilder:default={} + // +optional + Database MongoDB `json:"mongo"` + // +kubebuilder:default="eventrepository" + // +optional + Host string `json:"host"` + // +kubebuilder:default={} + // +optional + Ingress Ingress `json:"ingress"` +} + +type MessageBus struct { + // +kubebuilder:default={"queueName": "etos"} + // +optional + EiffelMessageBus RabbitMQ `json:"eiffel"` + // +kubebuilder:default={"queueName": "etos-*-temp"} + // +optional + ETOSMessageBus RabbitMQ `json:"logs"` +} + +type Etcd struct { + // Parameter is ignored if Deploy is set to true. + // +kubebuilder:default="etcd-client" + // +optional + Host string `json:"host"` + // Parameter is ignored if Deploy is set to true. + // +kubebuilder:default="2379" + // +optional + Port string `json:"port"` +} + +type Database struct { + // +kubebuilder:default=true + // +optional + Deploy bool `json:"deploy"` + // +kubebuilder:default={} + // +optional + Etcd Etcd `json:"etcd"` +} + +type ETOSAPI struct { + Image `json:",inline"` +} + +type ETOSSSE struct { + Image `json:",inline"` +} + +type ETOSLogArea struct { + Image `json:",inline"` +} + +type ETOSSuiteRunner struct { + Image `json:",inline"` + LogListener Image `json:"logListener"` +} + +type ETOSTestRunner struct { + Version string `json:"version"` +} + +type ETOSEnvironmentProvider struct { + Image `json:",inline"` +} + +type ETOSConfig struct { + // +kubebuilder:default="true" + // +optional + Dev string `json:"dev"` + // +kubebuilder:default="60" + // +optional + EventDataTimeout string `json:"eventDataTimeout"` + // +kubebuilder:default="10" + // +optional + TestSuiteTimeout string `json:"testSuiteTimeout"` + // +kubebuilder:default="3600" + // +optional + EnvironmentTimeout string `json:"environmentTimeout"` + // +kubebuilder:default="ETOS" + // +optional + Source string `json:"source"` + + // +optional + TestRunRetention Retention `json:"testrunRetention,omitempty"` + + // +kubebuilder:default={"value": ""} + EncryptionKey Var `json:"encryptionKey"` + + ETOSApiURL string `json:"etosApiURL,omitempty"` + ETOSEventRepositoryURL string `json:"etosEventRepositoryURL,omitempty"` + + // +kubebuilder:default="etos" + // +optional + RoutingKeyTag string `json:"routingKeyTag"` + + Timezone string `json:"timezone,omitempty"` +} + +type ETOS struct { + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/etos-api:latest"} + // +optional + API ETOSAPI `json:"api"` + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/etos-sse:latest"} + // +optional + SSE ETOSSSE `json:"sse"` + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/etos-log-area:latest"} + // +optional + LogArea ETOSLogArea `json:"logArea"` + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/etos-suite-runner:latest", "logListener": {"image": "registry.nordix.org/eiffel/etos-log-listener:latest"}} + // +optional + SuiteRunner ETOSSuiteRunner `json:"suiteRunner"` + // +kubebuilder:default={"version": "latest"} + // +optional + TestRunner ETOSTestRunner `json:"testRunner"` + // +kubebuilder:default={"image": "registry.nordix.org/eiffel/etos-environment-provider:latest"} + // +optional + EnvironmentProvider ETOSEnvironmentProvider `json:"environmentProvider"` + Ingress Ingress `json:"ingress,omitempty"` + // +kubebuilder:default={"encryptionKey": {"value": ""}} + // +optional + Config ETOSConfig `json:"config"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + // +kubebuilder:default={} + ETOS ETOS `json:"etos"` + // +kubebuilder:default={} + Database Database `json:"database"` + // +kubebuilder:default={} + MessageBus MessageBus `json:"messageBus"` + // +kubebuilder:default={} + EventRepository EventRepository `json:"eventRepository"` +} + +// ClusterStatus defines the observed state of Cluster +type ClusterStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the clusters API +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message" +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Cluster +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go new file mode 100644 index 00000000..9e520606 --- /dev/null +++ b/api/v1alpha1/common_types.go @@ -0,0 +1,133 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// VarSource describes a value from either a secretmap or configmap. +type VarSource struct { + ConfigMapKeyRef *corev1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + SecretKeyRef *corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +// Var describes either a string value or a value from a VarSource. +type Var struct { + Value string `json:"value,omitempty"` + ValueFrom VarSource `json:"valueFrom,omitempty"` +} + +// getFromSecretKeySelector returns the value of a key in a secret. +func (v *Var) getFromSecretKeySelector(ctx context.Context, client client.Client, secretKeySelector *corev1.SecretKeySelector, namespace string) ([]byte, error) { + name := types.NamespacedName{Name: secretKeySelector.Name, Namespace: namespace} + obj := &corev1.Secret{} + err := client.Get(ctx, name, obj) + if err != nil { + return nil, err + } + d, ok := obj.Data[secretKeySelector.Key] + if !ok { + return nil, fmt.Errorf("%s does not exist in secret %s/%s", secretKeySelector.Key, secretKeySelector.Name, namespace) + } + return d, nil +} + +// getFromConfigMapKeySelector returns the value of a key in a configmap. +func (v *Var) getFromConfigMapKeySelector(ctx context.Context, client client.Client, configMapKeySelector *corev1.ConfigMapKeySelector, namespace string) ([]byte, error) { + name := types.NamespacedName{Name: configMapKeySelector.Name, Namespace: namespace} + obj := &corev1.ConfigMap{} + err := client.Get(ctx, name, obj) + if err != nil { + return nil, err + } + d, ok := obj.Data[configMapKeySelector.Key] + if !ok { + return nil, fmt.Errorf("%s does not exist in configmap %s/%s", configMapKeySelector.Key, configMapKeySelector.Name, namespace) + } + return []byte(d), nil +} + +// Get the value from a Var struct. Either through the Value key, secret or configmap. +func (v *Var) Get(ctx context.Context, client client.Client, namespace string) ([]byte, error) { + if v.Value != "" { + return []byte(v.Value), nil + } + if v.ValueFrom.SecretKeyRef != nil { + return v.getFromSecretKeySelector(ctx, client, v.ValueFrom.SecretKeyRef, namespace) + } + if v.ValueFrom.ConfigMapKeyRef != nil { + return v.getFromConfigMapKeySelector(ctx, client, v.ValueFrom.ConfigMapKeyRef, namespace) + } + return nil, errors.New("found no source for key") +} + +// Image configuration. +type Image struct { + Image string `json:"image"` + + // +kubebuilder:default="IfNotPresent" + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy"` +} + +// Ingress configuration. +type Ingress struct { + // +kubebuilder:default=false + // +optional + Enabled bool `json:"enabled"` + IngressClass string `json:"ingressClass,omitempty"` + // +kubebuilder:default="" + Host string `json:"host,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// RabbitMQ configuration. +type RabbitMQ struct { + // +kubebuilder:default=false + // +optional + Deploy bool `json:"deploy"` + // +kubebuilder:default="rabbitmq" + // +optional + Host string `json:"host"` + // +kubebuilder:default="amq.topic" + // +optional + Exchange string `json:"exchange"` + // +kubebuilder:default={"value": "guest"} + // +optional + Password *Var `json:"password,omitempty"` + // +kubebuilder:default="guest" + // +optional + Username string `json:"username,omitempty"` + // +kubebuilder:default="5672" + // +optional + Port string `json:"port"` + // +kubebuilder:default="false" + // +optional + SSL string `json:"ssl"` + // +kubebuilder:default=/ + // +optional + Vhost string `json:"vhost"` + QueueName string `json:"queueName,omitempty"` + QueueParams string `json:"queueParams,omitempty"` +} diff --git a/api/v1alpha1/environment_types.go b/api/v1alpha1/environment_types.go new file mode 100644 index 00000000..f7c69867 --- /dev/null +++ b/api/v1alpha1/environment_types.go @@ -0,0 +1,83 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnvironmentSpec defines the desired state of Environment +type EnvironmentSpec struct { + Name string `json:"name"` + + // Snake casing as to be compatible with ETR. + + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + SuiteID string `json:"suite_id"` + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + SubSuiteID string `json:"sub_suite_id"` + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + MainSuiteID string `json:"test_suite_started_id"` + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + Artifact string `json:"artifact"` + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + Context string `json:"context"` + + Priority int `json:"priority"` + Tests []Test `json:"recipes"` + TestRunner string `json:"test_runner"` + Iut *apiextensionsv1.JSON `json:"iut"` + Executor *apiextensionsv1.JSON `json:"executor"` + LogArea *apiextensionsv1.JSON `json:"log_area"` +} + +// EnvironmentStatus defines the observed state of Environment +type EnvironmentStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + EnvironmentReleasers []corev1.ObjectReference `json:"environmentReleasers,omitempty"` + + CompletionTime *metav1.Time `json:"completionTime,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Environment is the Schema for the environments API +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].reason" +type Environment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EnvironmentSpec `json:"spec,omitempty"` + Status EnvironmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EnvironmentList contains a list of Environment +type EnvironmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Environment `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Environment{}, &EnvironmentList{}) +} diff --git a/api/v1alpha1/environmentrequest_types.go b/api/v1alpha1/environmentrequest_types.go new file mode 100644 index 00000000..7aa9091d --- /dev/null +++ b/api/v1alpha1/environmentrequest_types.go @@ -0,0 +1,104 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type IutProvider struct { + ID string `json:"id"` +} + +type LogAreaProvider struct { + ID string `json:"id"` +} + +type ExecutionSpaceProvider struct { + ID string `json:"id"` + TestRunner string `json:"testRunner"` +} + +type EnvironmentProviders struct { + IUT IutProvider `json:"iut,omitempty"` + ExecutionSpace ExecutionSpaceProvider `json:"executionSpace,omitempty"` + LogArea LogAreaProvider `json:"logArea,omitempty"` +} + +type Splitter struct { + Tests []Test `json:"tests"` +} + +// EnvironmentRequestSpec defines the desired state of EnvironmentRequest +type EnvironmentRequestSpec struct { + // ID is the ID for the environments generated. Will be generated if nil + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + + *Image `json:",inline"` + Identifier string `json:"identifier,omitempty"` + Artifact string `json:"artifact,omitempty"` + Identity string `json:"identity,omitempty"` + MinimumAmount int `json:"minimumAmount"` + MaximumAmount int `json:"maximumAmount"` + // TODO: Dataset per provider? + Dataset *apiextensionsv1.JSON `json:"dataset,omitempty"` + + Providers EnvironmentProviders `json:"providers"` + Splitter Splitter `json:"splitter"` +} + +// EnvironmentRequestStatus defines the observed state of EnvironmentRequest +type EnvironmentRequestStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + EnvironmentProviders []corev1.ObjectReference `json:"environmentProviders,omitempty"` + + StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EnvironmentRequest is the Schema for the environmentrequests API +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].reason" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message" +type EnvironmentRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EnvironmentRequestSpec `json:"spec,omitempty"` + Status EnvironmentRequestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EnvironmentRequestList contains a list of EnvironmentRequest +type EnvironmentRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EnvironmentRequest `json:"items"` +} + +func init() { + SchemeBuilder.Register(&EnvironmentRequest{}, &EnvironmentRequestList{}) +} diff --git a/api/v1alpha1/environmentrequest_webhook.go b/api/v1alpha1/environmentrequest_webhook.go new file mode 100644 index 00000000..249e93c6 --- /dev/null +++ b/api/v1alpha1/environmentrequest_webhook.go @@ -0,0 +1,47 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/util/uuid" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var environmentrequestlog = logf.Log.WithName("environmentrequest-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *EnvironmentRequest) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:path=/mutate-etos-eiffel-community-github-io-v1alpha1-environmentrequest,mutating=true,failurePolicy=fail,sideEffects=None,groups=etos.eiffel-community.github.io,resources=environmentrequests,verbs=create;update,versions=v1alpha1,name=menvironmentrequest.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &EnvironmentRequest{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *EnvironmentRequest) Default() { + environmentrequestlog.Info("default", "name", r.Name) + + if r.Spec.ID == "" { + r.Spec.ID = string(uuid.NewUUID()) + } +} diff --git a/api/v1alpha1/environmentrequest_webhook_test.go b/api/v1alpha1/environmentrequest_webhook_test.go new file mode 100644 index 00000000..3f62dd77 --- /dev/null +++ b/api/v1alpha1/environmentrequest_webhook_test.go @@ -0,0 +1,33 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("EnvironmentRequest Webhook", func() { + + Context("When creating EnvironmentRequest under Defaulting Webhook", func() { + It("Should fill in the default value if a required field is empty", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..b14b3e93 --- /dev/null +++ b/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 contains API Schema definitions for the etos v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=etos.eiffel-community.github.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "etos.eiffel-community.github.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/provider_types.go b/api/v1alpha1/provider_types.go new file mode 100644 index 00000000..32c95c7c --- /dev/null +++ b/api/v1alpha1/provider_types.go @@ -0,0 +1,144 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// JSONTasList is the List command in the JSONTas provider. +type JSONTasList struct { + Possible *apiextensionsv1.JSON `json:"possible"` + Available *apiextensionsv1.JSON `json:"available"` +} + +// Stage is the definition of a stage where to execute steps. +type Stage struct { + // +kubebuilder:default={} + // +optional + Steps *apiextensionsv1.JSON `json:"steps"` +} + +// JSONTasIUTPrepareStages defines the preparation stages required for an IUT. +type JSONTasPrepareStages struct { + // Underscore used in these due to backwards compatibility + EnvironmentProvider Stage `json:"environment_provider"` + SuiteRunner Stage `json:"suite_runner"` + TestRunner Stage `json:"test_runner"` +} + +// JSONTasIUTPrepare defines the preparation required for an IUT. +type JSONTasIUTPrepare struct { + Stages JSONTasPrepareStages `json:"stages"` +} + +// JSONTasIut is the IUT provider definition for the JSONTas provider. +type JSONTasIut struct { + ID string `json:"id"` + Checkin *apiextensionsv1.JSON `json:"checkin,omitempty"` + Checkout *apiextensionsv1.JSON `json:"checkout,omitempty"` + List JSONTasList `json:"list"` + Prepare JSONTasIUTPrepare `json:"prepare,omitempty"` +} + +// JSONTasExecutionSpace is the execution space provider definition for the JSONTas provider +type JSONTasExecutionSpace struct { + ID string `json:"id"` + Checkin *apiextensionsv1.JSON `json:"checkin,omitempty"` + Checkout *apiextensionsv1.JSON `json:"checkout,omitempty"` + List JSONTasList `json:"list"` +} + +// JSONTasLogArea is the log area provider definition for the JSONTas provider +type JSONTasLogArea struct { + ID string `json:"id"` + Checkin *apiextensionsv1.JSON `json:"checkin,omitempty"` + Checkout *apiextensionsv1.JSON `json:"checkout,omitempty"` + List JSONTasList `json:"list"` +} + +// JSONTas defines the definitions that a JSONTas provider shall use. +type JSONTas struct { + Image string `json:"image,omitempty"` + // These are pointers so that they become nil in the Provider object in Kubernetes + // and don't muddle up the yaml with empty data. + Iut *JSONTasIut `json:"iut,omitempty"` + ExecutionSpace *JSONTasExecutionSpace `json:"execution_space,omitempty"` + LogArea *JSONTasLogArea `json:"log,omitempty"` +} + +// Healthcheck defines the health check endpoint and interval for providers. +// The defaults of this should work most of the time. +type Healthcheck struct { + // +kubebuilder:default=/v1alpha1/selftest/ping + // +optional + Endpoint string `json:"endpoint"` + // +kubebuilder:default=30 + // +optional + IntervalSeconds int `json:"intervalSeconds"` +} + +// ProviderSpec defines the desired state of Provider +type ProviderSpec struct { + // +kubebuilder:validation:Enum=execution-space;iut;log-area + Type string `json:"type"` + // +optional + Host string `json:"host,omitempty"` + + // +kubebuilder:default={} + // +optional + Healthcheck *Healthcheck `json:"healthCheck,omitempty"` + + // These are pointers so that they become nil in the Provider object in Kubernetes + // and don't muddle up the yaml with empty data. + JSONTas *JSONTas `json:"jsontas,omitempty"` + JSONTasSource *VarSource `json:"jsontasSource,omitempty"` +} + +// ProviderStatus defines the observed state of Provider +type ProviderStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Provider is the Schema for the providers API +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].reason" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message" +type Provider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProviderSpec `json:"spec,omitempty"` + Status ProviderStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProviderList contains a list of Provider +type ProviderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Provider `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Provider{}, &ProviderList{}) +} diff --git a/api/v1alpha1/provider_webhook.go b/api/v1alpha1/provider_webhook.go new file mode 100644 index 00000000..1fd21de1 --- /dev/null +++ b/api/v1alpha1/provider_webhook.go @@ -0,0 +1,203 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var ( + providerlog = logf.Log.WithName("provider-resource") + cli client.Client +) + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *Provider) SetupWebhookWithManager(mgr ctrl.Manager) error { + if cli == nil { + cli = mgr.GetClient() + } + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// getFromSecretKeySelector returns the value of a key in a secret. +func getFromSecretKeySelector(ctx context.Context, client client.Client, secretKeySelector *corev1.SecretKeySelector, namespace string) ([]byte, error) { + name := types.NamespacedName{Name: secretKeySelector.Name, Namespace: namespace} + obj := &corev1.Secret{} + + providerlog.Info("Getting jsontas from a secret", "name", secretKeySelector.Name, "key", secretKeySelector.Key) + + // Retrying to make sure that the secret has been properly generated before a provider is applied. + // There is a race where, for example, a provider and a custom secret resource (such as a SealedSecret) + // are created at the same time and the secret does not get generated in time. + err := retry.OnError(retry.DefaultRetry, apierrors.IsNotFound, func() error { + err := client.Get(ctx, name, obj) + if err != nil { + providerlog.Error(err, "retry") + return err + } + return nil + }) + if err != nil { + return nil, err + } + d, ok := obj.Data[secretKeySelector.Key] + if !ok { + return nil, fmt.Errorf("%s does not exist in secret %s/%s", secretKeySelector.Key, secretKeySelector.Name, namespace) + } + return d, nil +} + +// getFromConfigMapKeySelector returns the value of a key in a configmap. +func getFromConfigMapKeySelector(ctx context.Context, client client.Client, configMapKeySelector *corev1.ConfigMapKeySelector, namespace string) ([]byte, error) { + name := types.NamespacedName{Name: configMapKeySelector.Name, Namespace: namespace} + obj := &corev1.ConfigMap{} + + // Retrying to make sure that the configmap has been properly generated before a provider is applied. + // There is a race where, for example, a provider and a custom configmap resource are created at the + // same time and the configmap does not get generated in time. + err := retry.OnError(retry.DefaultRetry, apierrors.IsNotFound, func() error { + return client.Get(ctx, name, obj) + }) + if err != nil { + return nil, err + } + d, ok := obj.Data[configMapKeySelector.Key] + if !ok { + return nil, fmt.Errorf("%s does not exist in configmap %s/%s", configMapKeySelector.Key, configMapKeySelector.Name, namespace) + } + return []byte(d), nil +} + +// Get the value from a secret or configmap ref. +func (r *Provider) Get(ctx context.Context, client client.Client, namespace string) ([]byte, error) { + if r.Spec.JSONTasSource.SecretKeyRef != nil { + return getFromSecretKeySelector(ctx, client, r.Spec.JSONTasSource.SecretKeyRef, namespace) + } + if r.Spec.JSONTasSource.ConfigMapKeyRef != nil { + return getFromConfigMapKeySelector(ctx, client, r.Spec.JSONTasSource.ConfigMapKeyRef, namespace) + } + return nil, errors.New("found no source for key") +} + +// +kubebuilder:webhook:path=/mutate-etos-eiffel-community-github-io-v1alpha1-provider,mutating=true,failurePolicy=fail,sideEffects=None,groups=etos.eiffel-community.github.io,resources=providers,verbs=create;update,versions=v1alpha1,name=mprovider.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &Provider{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *Provider) Default() { + providerlog.Info("default", "name", r.Name) + + if r.Spec.JSONTasSource == nil { + return + } + + jsontasBytes, err := r.Get(context.TODO(), cli, r.Namespace) + if err != nil { + providerlog.Error(err, "failed to get jsontas from provider") + return + } + providerlog.Info("Unmarshalling jsontasbytes", "jsontas", string(jsontasBytes)) + jsontas := &JSONTas{} + if err := json.Unmarshal(jsontasBytes, jsontas); err != nil { + providerlog.Error(err, "failed to unmarshal jsontas from provider") + return + } + providerlog.Info("Done", "jsontas", jsontas) + r.Spec.Healthcheck = nil // Not sure about this one + r.Spec.Host = "" // Not sure about this one + r.Spec.JSONTasSource = nil // Not sure about this one + r.Spec.JSONTas = jsontas +} + +// validate the spec of a Provider object. +func (r *Provider) validate() error { + var allErrs field.ErrorList + groupVersionKind := r.GroupVersionKind() + if r.Spec.JSONTas != nil && r.Spec.JSONTasSource != nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("jsonTas"), + r.Spec.JSONTas, + "only one of jsonTas and jsonTasSource is allowed")) + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("jsonTasSource"), + r.Spec.JSONTasSource, + "only one of jsonTas and jsonTasSource is allowed")) + } + if r.Spec.JSONTas == nil && r.Spec.JSONTasSource == nil { + if r.Spec.Host == "" { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("host"), + r.Spec.Host, + "host must be set when JSONTas is not")) + } + if r.Spec.Healthcheck == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("healthCheck"), + r.Spec.Healthcheck, + "healthCheck must be set when JSONTas is not")) + } + } + + if len(allErrs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: groupVersionKind.Group, Kind: groupVersionKind.Kind}, + r.Name, allErrs, + ) + } + return nil +} + +// +kubebuilder:webhook:path=/validate-etos-eiffel-community-github-io-v1alpha1-provider,mutating=false,failurePolicy=fail,sideEffects=None,groups=etos.eiffel-community.github.io,resources=providers,verbs=create;update,versions=v1alpha1,name=mprovider.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &Provider{} + +// ValidateCreate validates the creation of a Provider. +func (r *Provider) ValidateCreate() (admission.Warnings, error) { + providerlog.Info("validate create", "name", r.Name) + return nil, r.validate() +} + +// ValidateUpdate validates the updates of a Provider. +func (r *Provider) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + providerlog.Info("validate update", "name", r.Name) + return nil, r.validate() +} + +// ValidateDelete validates the deletion of a Provider. +func (r *Provider) ValidateDelete() (admission.Warnings, error) { + providerlog.Info("validate delete", "name", r.Name) + return nil, nil +} diff --git a/api/v1alpha1/provider_webhook_test.go b/api/v1alpha1/provider_webhook_test.go new file mode 100644 index 00000000..00168630 --- /dev/null +++ b/api/v1alpha1/provider_webhook_test.go @@ -0,0 +1,33 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Provider Webhook", func() { + + Context("When creating Provider under Defaulting Webhook", func() { + It("Should fill in the default value if a required field is empty", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha1/testrun_types.go b/api/v1alpha1/testrun_types.go new file mode 100644 index 00000000..d04781e0 --- /dev/null +++ b/api/v1alpha1/testrun_types.go @@ -0,0 +1,168 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Providers to use for test execution. These names must correspond to existing +// Provider kinds in the namespace where a testrun is created. +type Providers struct { + IUT string `json:"iut"` + LogArea string `json:"logArea"` + ExecutionSpace string `json:"executionSpace"` +} + +// TestCase metadata. +type TestCase struct { + ID string `json:"id"` + Version string `json:"version,omitempty"` + Tracker string `json:"tracker,omitempty"` + URI string `json:"uri,omitempty"` +} + +// Execution describes how to execute a testCase. +type Execution struct { + Checkout []string `json:"checkout"` + Parameters map[string]string `json:"parameters"` + Environment map[string]string `json:"environment"` + Command string `json:"command"` + Execute []string `json:"execute,omitempty"` + TestRunner string `json:"testRunner"` +} + +// TestEnvironment to run tests within. +type TestEnvironment struct{} + +type Test struct { + ID string `json:"id"` + TestCase TestCase `json:"testCase"` + Execution Execution `json:"execution"` + Environment TestEnvironment `json:"environment"` +} + +// Suite to execute. +type Suite struct { + // Name of the test suite. + Name string `json:"name"` + + // Priority to execute the test suite. + // +kubebuilder:default=1 + Priority int `json:"priority"` + + // Tests to execute as part of this suite. + Tests []Test `json:"tests"` + + // Dataset for this suite. + Dataset *apiextensionsv1.JSON `json:"dataset"` +} + +type TestRunner struct { + Version string `json:"version"` +} + +type SuiteRunner struct { + *Image `json:",inline"` +} + +type LogListener struct { + *Image `json:",inline"` +} + +type EnvironmentProvider struct { + *Image `json:",inline"` +} + +// Retention describes the failure and success retentions for testruns. +type Retention struct { + // +optional + Failure *metav1.Duration `json:"failure,omitempty"` + // +optional + Success *metav1.Duration `json:"success,omitempty"` +} + +// TestRunSpec defines the desired state of TestRun +type TestRunSpec struct { + // Name of the ETOS cluster to execute the testrun in. + Cluster string `json:"cluster,omitempty"` + + // ID is the test suite ID for this execution. Will be generated if nil + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + ID string `json:"id,omitempty"` + + // Artifact is the ID of the software under test. + // +kubebuilder:validation:Pattern="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}" + Artifact string `json:"artifact"` + + // +optional + Retention Retention `json:"retention,omitempty"` + + SuiteRunner *SuiteRunner `json:"suiteRunner,omitempty"` + TestRunner *TestRunner `json:"testRunner,omitempty"` + LogListener *LogListener `json:"logListener,omitempty"` + EnvironmentProvider *EnvironmentProvider `json:"environmentProvider,omitempty"` + Identity string `json:"identity"` + Providers Providers `json:"providers"` + Suites []Suite `json:"suites"` +} + +// TestRunStatus defines the observed state of TestRun +type TestRunStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + SuiteRunners []corev1.ObjectReference `json:"suiteRunners,omitempty"` + EnvironmentRequests []corev1.ObjectReference `json:"environmentRequests,omitempty"` + + StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + Verdict string `json:"verdict,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TestRun is the Schema for the testruns API +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Environment",type="string",JSONPath=".status.conditions[?(@.type==\"Environment\")].reason" +// +kubebuilder:printcolumn:name="Suiterunner",type="string",JSONPath=".status.conditions[?(@.type==\"SuiteRunner\")].reason" +// +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status" +// +kubebuilder:printcolumn:name="Verdict",type="string",JSONPath=".status.verdict" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].message" +// +kubebuilder:printcolumn:name="ID",type="string",JSONPath=.metadata.labels.etos\.eiffel-community\.github\.io/id +type TestRun struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TestRunSpec `json:"spec,omitempty"` + Status TestRunStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TestRunList contains a list of TestRun +type TestRunList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TestRun `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TestRun{}, &TestRunList{}) +} diff --git a/api/v1alpha1/testrun_webhook.go b/api/v1alpha1/testrun_webhook.go new file mode 100644 index 00000000..7109710b --- /dev/null +++ b/api/v1alpha1/testrun_webhook.go @@ -0,0 +1,171 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// testrunlog is for logging in this package. +var testrunlog = logf.Log.WithName("testrun-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *TestRun) SetupWebhookWithManager(mgr ctrl.Manager) error { + if cli == nil { + cli = mgr.GetClient() + } + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:path=/mutate-etos-eiffel-community-github-io-v1alpha1-testrun,mutating=true,failurePolicy=fail,sideEffects=None,groups=etos.eiffel-community.github.io,resources=testruns,verbs=create;update,versions=v1alpha1,name=mtestrun.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &TestRun{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *TestRun) Default() { + testrunlog.Info("default", "name", r.Name, "namespace", r.Namespace) + + if r.Spec.ID == "" { + r.Spec.ID = string(uuid.NewUUID()) + } + clusters := &ClusterList{} + if r.Spec.Cluster == "" { + if err := cli.List(context.TODO(), clusters, client.InNamespace(r.Namespace)); err != nil { + testrunlog.Error(err, "name", r.Name, "namespace", r.Namespace, "Failed to get clusters in namespace") + } + } + var cluster *Cluster + if len(clusters.Items) == 1 { + cluster = &clusters.Items[0] + r.Spec.Cluster = cluster.Name + } + if r.Spec.SuiteRunner == nil && cluster != nil { + r.Spec.SuiteRunner = &SuiteRunner{&cluster.Spec.ETOS.SuiteRunner.Image} + } + if r.Spec.LogListener == nil && cluster != nil { + r.Spec.LogListener = &LogListener{&cluster.Spec.ETOS.SuiteRunner.LogListener} + } + if r.Spec.EnvironmentProvider == nil && cluster != nil { + r.Spec.EnvironmentProvider = &EnvironmentProvider{&cluster.Spec.ETOS.EnvironmentProvider.Image} + } + if r.Spec.TestRunner == nil && cluster != nil { + r.Spec.TestRunner = &TestRunner{Version: cluster.Spec.ETOS.TestRunner.Version} + } + + addLabel := true + if r.ObjectMeta.Labels != nil { + for key := range r.ObjectMeta.GetLabels() { + if key == "etos.eiffel-community.github.io/id" { + addLabel = false + } + } + } else { + r.ObjectMeta.Labels = map[string]string{} + } + if addLabel { + r.ObjectMeta.Labels["etos.eiffel-community.github.io/id"] = r.Spec.ID + } +} + +// +kubebuilder:webhook:path=/validate-etos-eiffel-community-github-io-v1alpha1-testrun,mutating=false,failurePolicy=fail,sideEffects=None,groups=etos.eiffel-community.github.io,resources=testruns,verbs=create;update,versions=v1alpha1,name=mtestrun.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &TestRun{} + +// validate that the required parameters are set. This validation is done here instead of directly in the struct since +// we do mutate the input in the Default function. +func (r *TestRun) validate() error { + var allErrs field.ErrorList + if r.Spec.Cluster == "" { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("cluster"), + r.Spec.Cluster, + "Cluster is missing, either no cluster exists in namespace or too many to choose from", + )) + } + + if r.Spec.SuiteRunner == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("suiteRunner"), + r.Spec.SuiteRunner, + "SuiteRunner image information is missing, maybe because cluster is missing?", + )) + } + + if r.Spec.LogListener == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("logListener"), + r.Spec.LogListener, + "LogListener image information is missing, maybe because cluster is missing?", + )) + } + + if r.Spec.EnvironmentProvider == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("environmentProvider"), + r.Spec.EnvironmentProvider, + "EnvironmentProvider image information is missing, maybe because cluster is missing?", + )) + } + + if r.Spec.TestRunner == nil { + allErrs = append(allErrs, field.Invalid( + field.NewPath("spec").Child("testRunner"), + r.Spec.TestRunner, + "TestRunner version information is missing, maybe because cluster is missing?", + )) + } + + groupVersionKind := r.GroupVersionKind() + if len(allErrs) > 0 { + return apierrors.NewInvalid( + schema.GroupKind{Group: groupVersionKind.Group, Kind: groupVersionKind.Kind}, + r.Name, allErrs, + ) + } + return nil +} + +// ValidateCreate validates the creation of a TestRun. +func (r *TestRun) ValidateCreate() (admission.Warnings, error) { + testrunlog.Info("validate create", "name", r.Name) + return nil, r.validate() +} + +// ValidateUpdate validates the updates of a TestRun. +func (r *TestRun) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + testrunlog.Info("validate update", "name", r.Name) + return nil, r.validate() +} + +// ValidateDelete validates the deletion of a TestRun. +func (r *TestRun) ValidateDelete() (admission.Warnings, error) { + testrunlog.Info("validate delete", "name", r.Name) + return nil, nil +} diff --git a/api/v1alpha1/testrun_webhook_test.go b/api/v1alpha1/testrun_webhook_test.go new file mode 100644 index 00000000..cc34bb5f --- /dev/null +++ b/api/v1alpha1/testrun_webhook_test.go @@ -0,0 +1,33 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("TestRun Webhook", func() { + + Context("When creating TestRun under Defaulting Webhook", func() { + It("Should fill in the default value if a required field is empty", func() { + + // TODO(user): Add your logic here + + }) + }) + +}) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go new file mode 100644 index 00000000..0fb495cc --- /dev/null +++ b/api/v1alpha1/webhook_suite_test.go @@ -0,0 +1,151 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + // +kubebuilder:scaffold:imports + apimachineryruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.30.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := apimachineryruntime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + WebhookServer: webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + Metrics: metricsserver.Options{BindAddress: "0"}, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&Provider{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&TestRun{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + err = (&EnvironmentRequest{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + return conn.Close() + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..69fe4fff --- /dev/null +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1464 @@ +//go:build !ignore_autogenerated + +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ETOS.DeepCopyInto(&out.ETOS) + out.Database = in.Database + in.MessageBus.DeepCopyInto(&out.MessageBus) + in.EventRepository.DeepCopyInto(&out.EventRepository) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + out.Etcd = in.Etcd +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOS) DeepCopyInto(out *ETOS) { + *out = *in + out.API = in.API + out.SSE = in.SSE + out.LogArea = in.LogArea + out.SuiteRunner = in.SuiteRunner + out.TestRunner = in.TestRunner + out.EnvironmentProvider = in.EnvironmentProvider + in.Ingress.DeepCopyInto(&out.Ingress) + in.Config.DeepCopyInto(&out.Config) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOS. +func (in *ETOS) DeepCopy() *ETOS { + if in == nil { + return nil + } + out := new(ETOS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSAPI) DeepCopyInto(out *ETOSAPI) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSAPI. +func (in *ETOSAPI) DeepCopy() *ETOSAPI { + if in == nil { + return nil + } + out := new(ETOSAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSConfig) DeepCopyInto(out *ETOSConfig) { + *out = *in + in.TestRunRetention.DeepCopyInto(&out.TestRunRetention) + in.EncryptionKey.DeepCopyInto(&out.EncryptionKey) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSConfig. +func (in *ETOSConfig) DeepCopy() *ETOSConfig { + if in == nil { + return nil + } + out := new(ETOSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSEnvironmentProvider) DeepCopyInto(out *ETOSEnvironmentProvider) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSEnvironmentProvider. +func (in *ETOSEnvironmentProvider) DeepCopy() *ETOSEnvironmentProvider { + if in == nil { + return nil + } + out := new(ETOSEnvironmentProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSLogArea) DeepCopyInto(out *ETOSLogArea) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSLogArea. +func (in *ETOSLogArea) DeepCopy() *ETOSLogArea { + if in == nil { + return nil + } + out := new(ETOSLogArea) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSSSE) DeepCopyInto(out *ETOSSSE) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSSSE. +func (in *ETOSSSE) DeepCopy() *ETOSSSE { + if in == nil { + return nil + } + out := new(ETOSSSE) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSSuiteRunner) DeepCopyInto(out *ETOSSuiteRunner) { + *out = *in + out.Image = in.Image + out.LogListener = in.LogListener +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSSuiteRunner. +func (in *ETOSSuiteRunner) DeepCopy() *ETOSSuiteRunner { + if in == nil { + return nil + } + out := new(ETOSSuiteRunner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ETOSTestRunner) DeepCopyInto(out *ETOSTestRunner) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ETOSTestRunner. +func (in *ETOSTestRunner) DeepCopy() *ETOSTestRunner { + if in == nil { + return nil + } + out := new(ETOSTestRunner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Environment) DeepCopyInto(out *Environment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Environment. +func (in *Environment) DeepCopy() *Environment { + if in == nil { + return nil + } + out := new(Environment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Environment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentList) DeepCopyInto(out *EnvironmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Environment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentList. +func (in *EnvironmentList) DeepCopy() *EnvironmentList { + if in == nil { + return nil + } + out := new(EnvironmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvironmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentProvider) DeepCopyInto(out *EnvironmentProvider) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentProvider. +func (in *EnvironmentProvider) DeepCopy() *EnvironmentProvider { + if in == nil { + return nil + } + out := new(EnvironmentProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentProviders) DeepCopyInto(out *EnvironmentProviders) { + *out = *in + out.IUT = in.IUT + out.ExecutionSpace = in.ExecutionSpace + out.LogArea = in.LogArea +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentProviders. +func (in *EnvironmentProviders) DeepCopy() *EnvironmentProviders { + if in == nil { + return nil + } + out := new(EnvironmentProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentRequest) DeepCopyInto(out *EnvironmentRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentRequest. +func (in *EnvironmentRequest) DeepCopy() *EnvironmentRequest { + if in == nil { + return nil + } + out := new(EnvironmentRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvironmentRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentRequestList) DeepCopyInto(out *EnvironmentRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EnvironmentRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentRequestList. +func (in *EnvironmentRequestList) DeepCopy() *EnvironmentRequestList { + if in == nil { + return nil + } + out := new(EnvironmentRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvironmentRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentRequestSpec) DeepCopyInto(out *EnvironmentRequestSpec) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + **out = **in + } + if in.Dataset != nil { + in, out := &in.Dataset, &out.Dataset + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + out.Providers = in.Providers + in.Splitter.DeepCopyInto(&out.Splitter) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentRequestSpec. +func (in *EnvironmentRequestSpec) DeepCopy() *EnvironmentRequestSpec { + if in == nil { + return nil + } + out := new(EnvironmentRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentRequestStatus) DeepCopyInto(out *EnvironmentRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvironmentProviders != nil { + in, out := &in.EnvironmentProviders, &out.EnvironmentProviders + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentRequestStatus. +func (in *EnvironmentRequestStatus) DeepCopy() *EnvironmentRequestStatus { + if in == nil { + return nil + } + out := new(EnvironmentRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]Test, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Iut != nil { + in, out := &in.Iut, &out.Iut + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Executor != nil { + in, out := &in.Executor, &out.Executor + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.LogArea != nil { + in, out := &in.LogArea, &out.LogArea + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec. +func (in *EnvironmentSpec) DeepCopy() *EnvironmentSpec { + if in == nil { + return nil + } + out := new(EnvironmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentStatus) DeepCopyInto(out *EnvironmentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvironmentReleasers != nil { + in, out := &in.EnvironmentReleasers, &out.EnvironmentReleasers + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentStatus. +func (in *EnvironmentStatus) DeepCopy() *EnvironmentStatus { + if in == nil { + return nil + } + out := new(EnvironmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventRepository) DeepCopyInto(out *EventRepository) { + *out = *in + out.API = in.API + out.Storage = in.Storage + in.Database.DeepCopyInto(&out.Database) + in.Ingress.DeepCopyInto(&out.Ingress) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventRepository. +func (in *EventRepository) DeepCopy() *EventRepository { + if in == nil { + return nil + } + out := new(EventRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Execution) DeepCopyInto(out *Execution) { + *out = *in + if in.Checkout != nil { + in, out := &in.Checkout, &out.Checkout + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Execute != nil { + in, out := &in.Execute, &out.Execute + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Execution. +func (in *Execution) DeepCopy() *Execution { + if in == nil { + return nil + } + out := new(Execution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecutionSpaceProvider) DeepCopyInto(out *ExecutionSpaceProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutionSpaceProvider. +func (in *ExecutionSpaceProvider) DeepCopy() *ExecutionSpaceProvider { + if in == nil { + return nil + } + out := new(ExecutionSpaceProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Healthcheck) DeepCopyInto(out *Healthcheck) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Healthcheck. +func (in *Healthcheck) DeepCopy() *Healthcheck { + if in == nil { + return nil + } + out := new(Healthcheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IutProvider) DeepCopyInto(out *IutProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IutProvider. +func (in *IutProvider) DeepCopy() *IutProvider { + if in == nil { + return nil + } + out := new(IutProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTas) DeepCopyInto(out *JSONTas) { + *out = *in + if in.Iut != nil { + in, out := &in.Iut, &out.Iut + *out = new(JSONTasIut) + (*in).DeepCopyInto(*out) + } + if in.ExecutionSpace != nil { + in, out := &in.ExecutionSpace, &out.ExecutionSpace + *out = new(JSONTasExecutionSpace) + (*in).DeepCopyInto(*out) + } + if in.LogArea != nil { + in, out := &in.LogArea, &out.LogArea + *out = new(JSONTasLogArea) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTas. +func (in *JSONTas) DeepCopy() *JSONTas { + if in == nil { + return nil + } + out := new(JSONTas) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasExecutionSpace) DeepCopyInto(out *JSONTasExecutionSpace) { + *out = *in + if in.Checkin != nil { + in, out := &in.Checkin, &out.Checkin + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Checkout != nil { + in, out := &in.Checkout, &out.Checkout + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + in.List.DeepCopyInto(&out.List) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasExecutionSpace. +func (in *JSONTasExecutionSpace) DeepCopy() *JSONTasExecutionSpace { + if in == nil { + return nil + } + out := new(JSONTasExecutionSpace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasIUTPrepare) DeepCopyInto(out *JSONTasIUTPrepare) { + *out = *in + in.Stages.DeepCopyInto(&out.Stages) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasIUTPrepare. +func (in *JSONTasIUTPrepare) DeepCopy() *JSONTasIUTPrepare { + if in == nil { + return nil + } + out := new(JSONTasIUTPrepare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasIut) DeepCopyInto(out *JSONTasIut) { + *out = *in + if in.Checkin != nil { + in, out := &in.Checkin, &out.Checkin + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Checkout != nil { + in, out := &in.Checkout, &out.Checkout + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + in.List.DeepCopyInto(&out.List) + in.Prepare.DeepCopyInto(&out.Prepare) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasIut. +func (in *JSONTasIut) DeepCopy() *JSONTasIut { + if in == nil { + return nil + } + out := new(JSONTasIut) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasList) DeepCopyInto(out *JSONTasList) { + *out = *in + if in.Possible != nil { + in, out := &in.Possible, &out.Possible + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Available != nil { + in, out := &in.Available, &out.Available + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasList. +func (in *JSONTasList) DeepCopy() *JSONTasList { + if in == nil { + return nil + } + out := new(JSONTasList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasLogArea) DeepCopyInto(out *JSONTasLogArea) { + *out = *in + if in.Checkin != nil { + in, out := &in.Checkin, &out.Checkin + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Checkout != nil { + in, out := &in.Checkout, &out.Checkout + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + in.List.DeepCopyInto(&out.List) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasLogArea. +func (in *JSONTasLogArea) DeepCopy() *JSONTasLogArea { + if in == nil { + return nil + } + out := new(JSONTasLogArea) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONTasPrepareStages) DeepCopyInto(out *JSONTasPrepareStages) { + *out = *in + in.EnvironmentProvider.DeepCopyInto(&out.EnvironmentProvider) + in.SuiteRunner.DeepCopyInto(&out.SuiteRunner) + in.TestRunner.DeepCopyInto(&out.TestRunner) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONTasPrepareStages. +func (in *JSONTasPrepareStages) DeepCopy() *JSONTasPrepareStages { + if in == nil { + return nil + } + out := new(JSONTasPrepareStages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAreaProvider) DeepCopyInto(out *LogAreaProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAreaProvider. +func (in *LogAreaProvider) DeepCopy() *LogAreaProvider { + if in == nil { + return nil + } + out := new(LogAreaProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogListener) DeepCopyInto(out *LogListener) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogListener. +func (in *LogListener) DeepCopy() *LogListener { + if in == nil { + return nil + } + out := new(LogListener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MessageBus) DeepCopyInto(out *MessageBus) { + *out = *in + in.EiffelMessageBus.DeepCopyInto(&out.EiffelMessageBus) + in.ETOSMessageBus.DeepCopyInto(&out.ETOSMessageBus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MessageBus. +func (in *MessageBus) DeepCopy() *MessageBus { + if in == nil { + return nil + } + out := new(MessageBus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDB) DeepCopyInto(out *MongoDB) { + *out = *in + in.URI.DeepCopyInto(&out.URI) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDB. +func (in *MongoDB) DeepCopy() *MongoDB { + if in == nil { + return nil + } + out := new(MongoDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provider) DeepCopyInto(out *Provider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider. +func (in *Provider) DeepCopy() *Provider { + if in == nil { + return nil + } + out := new(Provider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Provider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderList) DeepCopyInto(out *ProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Provider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderList. +func (in *ProviderList) DeepCopy() *ProviderList { + if in == nil { + return nil + } + out := new(ProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(Healthcheck) + **out = **in + } + if in.JSONTas != nil { + in, out := &in.JSONTas, &out.JSONTas + *out = new(JSONTas) + (*in).DeepCopyInto(*out) + } + if in.JSONTasSource != nil { + in, out := &in.JSONTasSource, &out.JSONTasSource + *out = new(VarSource) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderStatus) DeepCopyInto(out *ProviderStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderStatus. +func (in *ProviderStatus) DeepCopy() *ProviderStatus { + if in == nil { + return nil + } + out := new(ProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Providers) DeepCopyInto(out *Providers) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Providers. +func (in *Providers) DeepCopy() *Providers { + if in == nil { + return nil + } + out := new(Providers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RabbitMQ) DeepCopyInto(out *RabbitMQ) { + *out = *in + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(Var) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RabbitMQ. +func (in *RabbitMQ) DeepCopy() *RabbitMQ { + if in == nil { + return nil + } + out := new(RabbitMQ) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Retention) DeepCopyInto(out *Retention) { + *out = *in + if in.Failure != nil { + in, out := &in.Failure, &out.Failure + *out = new(v1.Duration) + **out = **in + } + if in.Success != nil { + in, out := &in.Success, &out.Success + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Retention. +func (in *Retention) DeepCopy() *Retention { + if in == nil { + return nil + } + out := new(Retention) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Splitter) DeepCopyInto(out *Splitter) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]Test, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Splitter. +func (in *Splitter) DeepCopy() *Splitter { + if in == nil { + return nil + } + out := new(Splitter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stage) DeepCopyInto(out *Stage) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stage. +func (in *Stage) DeepCopy() *Stage { + if in == nil { + return nil + } + out := new(Stage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Suite) DeepCopyInto(out *Suite) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]Test, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Dataset != nil { + in, out := &in.Dataset, &out.Dataset + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suite. +func (in *Suite) DeepCopy() *Suite { + if in == nil { + return nil + } + out := new(Suite) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuiteRunner) DeepCopyInto(out *SuiteRunner) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuiteRunner. +func (in *SuiteRunner) DeepCopy() *SuiteRunner { + if in == nil { + return nil + } + out := new(SuiteRunner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Test) DeepCopyInto(out *Test) { + *out = *in + out.TestCase = in.TestCase + in.Execution.DeepCopyInto(&out.Execution) + out.Environment = in.Environment +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Test. +func (in *Test) DeepCopy() *Test { + if in == nil { + return nil + } + out := new(Test) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestCase) DeepCopyInto(out *TestCase) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestCase. +func (in *TestCase) DeepCopy() *TestCase { + if in == nil { + return nil + } + out := new(TestCase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestEnvironment) DeepCopyInto(out *TestEnvironment) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestEnvironment. +func (in *TestEnvironment) DeepCopy() *TestEnvironment { + if in == nil { + return nil + } + out := new(TestEnvironment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestRun) DeepCopyInto(out *TestRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRun. +func (in *TestRun) DeepCopy() *TestRun { + if in == nil { + return nil + } + out := new(TestRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestRunList) DeepCopyInto(out *TestRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TestRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRunList. +func (in *TestRunList) DeepCopy() *TestRunList { + if in == nil { + return nil + } + out := new(TestRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestRunSpec) DeepCopyInto(out *TestRunSpec) { + *out = *in + in.Retention.DeepCopyInto(&out.Retention) + if in.SuiteRunner != nil { + in, out := &in.SuiteRunner, &out.SuiteRunner + *out = new(SuiteRunner) + (*in).DeepCopyInto(*out) + } + if in.TestRunner != nil { + in, out := &in.TestRunner, &out.TestRunner + *out = new(TestRunner) + **out = **in + } + if in.LogListener != nil { + in, out := &in.LogListener, &out.LogListener + *out = new(LogListener) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentProvider != nil { + in, out := &in.EnvironmentProvider, &out.EnvironmentProvider + *out = new(EnvironmentProvider) + (*in).DeepCopyInto(*out) + } + out.Providers = in.Providers + if in.Suites != nil { + in, out := &in.Suites, &out.Suites + *out = make([]Suite, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRunSpec. +func (in *TestRunSpec) DeepCopy() *TestRunSpec { + if in == nil { + return nil + } + out := new(TestRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestRunStatus) DeepCopyInto(out *TestRunStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuiteRunners != nil { + in, out := &in.SuiteRunners, &out.SuiteRunners + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.EnvironmentRequests != nil { + in, out := &in.EnvironmentRequests, &out.EnvironmentRequests + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRunStatus. +func (in *TestRunStatus) DeepCopy() *TestRunStatus { + if in == nil { + return nil + } + out := new(TestRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestRunner) DeepCopyInto(out *TestRunner) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRunner. +func (in *TestRunner) DeepCopy() *TestRunner { + if in == nil { + return nil + } + out := new(TestRunner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Var) DeepCopyInto(out *Var) { + *out = *in + in.ValueFrom.DeepCopyInto(&out.ValueFrom) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Var. +func (in *Var) DeepCopy() *Var { + if in == nil { + return nil + } + out := new(Var) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VarSource) DeepCopyInto(out *VarSource) { + *out = *in + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VarSource. +func (in *VarSource) DeepCopy() *VarSource { + if in == nil { + return nil + } + out := new(VarSource) + in.DeepCopyInto(out) + return out +} diff --git a/cli/src/etos_client/etos/etos.py b/cli/src/etos_client/etos/etos.py index 5bb04f6d..b5ee7664 100644 --- a/cli/src/etos_client/etos/etos.py +++ b/cli/src/etos_client/etos/etos.py @@ -45,9 +45,10 @@ class ETOS: # pylint:disable=too-few-public-methods reason = "" response: ResponseSchema = None - def __init__(self, cluster: str) -> None: + def __init__(self, cluster: str, v1alpha: bool = False) -> None: """Initialize ETOS.""" self.cluster = cluster + self.v1alpha = v1alpha # ping HTTP client with 5 sec timeout for each attempt: self.__http_ping = Http(retry=HTTP_RETRY_PARAMETERS, timeout=5) # greater HTTP timeout for other requests: @@ -83,7 +84,10 @@ def __start(self, request_data: RequestSchema) -> Union[ResponseSchema, None]: def __retry_trigger_etos(self, request_data: RequestSchema) -> Union[ResponseSchema, None]: """Trigger ETOS, retrying on non-client errors until successful.""" # retry rules are set in the Http client - response = self.__http.post(f"{self.cluster}/api/etos", json=request_data.dict()) + if self.v1alpha: + response = self.__http.post(f"{self.cluster}/api/v1alpha/testrun", json=request_data.model_dump()) + else: + response = self.__http.post(f"{self.cluster}/api/etos", json=request_data.model_dump()) if self.__response_ok(response): return ResponseSchema.from_response(response.json()) self.logger.critical("Failed to trigger ETOS.") diff --git a/cli/src/etos_client/start.py b/cli/src/etos_client/start.py index 48c5f189..a3a921f5 100644 --- a/cli/src/etos_client/start.py +++ b/cli/src/etos_client/start.py @@ -41,7 +41,7 @@ def start(args: dict) -> ETOS: """Start ETOS.""" - etos = ETOS(args[""]) + etos = ETOS(args[""], args["--v1alpha"]) response = etos.start(RequestSchema.from_args(args)) if not response: sys.exit(etos.reason) @@ -103,7 +103,7 @@ class Start(SubCommand): """ Client for executing test automation suites in ETOS. - Usage: etosctl testrun start [-v|-vv] [-h] -i IDENTITY -s TEST_SUITE [--no-tty] [-w WORKSPACE] [-a ARTIFACT_DIR] [-r REPORT_DIR] [-d DOWNLOAD_REPORTS] [--iut-provider IUT_PROVIDER] [--execution-space-provider EXECUTION_SPACE_PROVIDER] [--log-area-provider LOG_AREA_PROVIDER] [--dataset=DATASET]... [--version] + Usage: etosctl testrun start [-v|-vv] [-h] -i IDENTITY -s TEST_SUITE [--no-tty] [-w WORKSPACE] [-a ARTIFACT_DIR] [-r REPORT_DIR] [-d DOWNLOAD_REPORTS] [--iut-provider IUT_PROVIDER] [--execution-space-provider EXECUTION_SPACE_PROVIDER] [--log-area-provider LOG_AREA_PROVIDER] [--v1alpha] [--dataset=DATASET]... [--version] Options: -h, --help Show this help message and exit @@ -118,6 +118,7 @@ class Start(SubCommand): --execution-space-provider EXECUTION_SPACE_PROVIDER Which execution space provider to use. --log-area-provider LOG_AREA_PROVIDER Which log area provider to use. --dataset DATASET Additional dataset information to the environment provider. Check with your provider which information can be supplied. + --v1alpha Run in the v1alpha version of ETOS. --version Show program's version number and exit """ diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 00000000..76364ec9 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,212 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package main + +import ( + "crypto/tls" + "flag" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/utils/clock" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + "github.com/eiffel-community/etos/internal/controller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(etosv1alpha1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8181", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: tlsOpts, + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + FilterProvider: filters.WithAuthenticationAndAuthorization, + }, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "8ff393c5.eiffel-community.github.io", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controller.TestRunReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Clock: &clock.RealClock{}, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "TestRun") + os.Exit(1) + } + if err = (&controller.ProviderReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Provider") + os.Exit(1) + } + if err = (&controller.EnvironmentReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Environment") + os.Exit(1) + } + if err = (&controller.ClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Cluster") + os.Exit(1) + } + if err = (&controller.EnvironmentRequestReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "EnvironmentRequest") + os.Exit(1) + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&etosv1alpha1.Provider{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Provider") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&etosv1alpha1.TestRun{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "TestRun") + os.Exit(1) + } + } + if os.Getenv("ENABLE_WEBHOOKS") != "false" { + if err = (&etosv1alpha1.EnvironmentRequest{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "EnvironmentRequest") + os.Exit(1) + } + } + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 00000000..7a9a6d82 --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,35 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: etos + app.kubernetes.io/part-of: etos + app.kubernetes.io/managed-by: kustomize + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + dnsNames: + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 00000000..bebea5a5 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000..cf6f89e8 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,8 @@ +# This configuration is for teaching kustomize how to update name ref substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name diff --git a/config/crd/bases/etos.eiffel-community.github.io_clusters.yaml b/config/crd/bases/etos.eiffel-community.github.io_clusters.yaml new file mode 100644 index 00000000..c748c175 --- /dev/null +++ b/config/crd/bases/etos.eiffel-community.github.io_clusters.yaml @@ -0,0 +1,703 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clusters.etos.eiffel-community.github.io +spec: + group: etos.eiffel-community.github.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the clusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + database: + default: {} + properties: + deploy: + default: true + type: boolean + etcd: + default: {} + properties: + host: + default: etcd-client + description: Parameter is ignored if Deploy is set to true. + type: string + port: + default: "2379" + description: Parameter is ignored if Deploy is set to true. + type: string + type: object + type: object + etos: + default: {} + properties: + api: + default: + image: registry.nordix.org/eiffel/etos-api:latest + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + config: + default: + encryptionKey: + value: "" + properties: + dev: + default: "true" + type: string + encryptionKey: + default: + value: "" + description: Var describes either a string value or a value + from a VarSource. + properties: + value: + type: string + valueFrom: + description: VarSource describes a value from either a + secretmap or configmap. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeySelector selects a key of a + Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + environmentTimeout: + default: "3600" + type: string + etosApiURL: + type: string + etosEventRepositoryURL: + type: string + eventDataTimeout: + default: "60" + type: string + routingKeyTag: + default: etos + type: string + source: + default: ETOS + type: string + testSuiteTimeout: + default: "10" + type: string + testrunRetention: + properties: + failure: + type: string + success: + type: string + type: object + timezone: + type: string + required: + - encryptionKey + type: object + environmentProvider: + default: + image: registry.nordix.org/eiffel/etos-environment-provider:latest + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + ingress: + description: Ingress configuration. + properties: + annotations: + additionalProperties: + type: string + type: object + enabled: + default: false + type: boolean + host: + default: "" + type: string + ingressClass: + type: string + type: object + logArea: + default: + image: registry.nordix.org/eiffel/etos-log-area:latest + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + sse: + default: + image: registry.nordix.org/eiffel/etos-sse:latest + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + suiteRunner: + default: + image: registry.nordix.org/eiffel/etos-suite-runner:latest + logListener: + image: registry.nordix.org/eiffel/etos-log-listener:latest + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + logListener: + description: Image configuration. + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + required: + - image + type: object + required: + - image + - logListener + type: object + testRunner: + default: + version: latest + properties: + version: + type: string + required: + - version + type: object + type: object + eventRepository: + default: {} + properties: + api: + default: + image: registry.nordix.org/eiffel/eiffel-graphql-api:latest + description: |- + We do not build the GraphQL API automatically nor publish it remotely. + This will need to be provided to work. + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + deploy: + default: false + description: Deploy a local event repository for a cluster. + type: boolean + host: + default: eventrepository + type: string + ingress: + default: {} + description: Ingress configuration. + properties: + annotations: + additionalProperties: + type: string + type: object + enabled: + default: false + type: boolean + host: + default: "" + type: string + ingressClass: + type: string + type: object + mongo: + default: {} + properties: + deploy: + default: false + type: boolean + uri: + default: + value: mongodb://root:password@mongodb:27017/admin + description: Ignored if deploy is true + properties: + value: + type: string + valueFrom: + description: VarSource describes a value from either a + secretmap or configmap. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeySelector selects a key of a + Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + storage: + default: + image: registry.nordix.org/eiffel/eiffel-graphql-storage:latest + description: |- + We do not build the GraphQL API automatically nor publish it remotely. + This will need to be provided to work. + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + required: + - image + type: object + type: object + messageBus: + default: {} + properties: + eiffel: + default: + queueName: etos + description: RabbitMQ configuration. + properties: + deploy: + default: false + type: boolean + exchange: + default: amq.topic + type: string + host: + default: rabbitmq + type: string + password: + default: + value: guest + description: Var describes either a string value or a value + from a VarSource. + properties: + value: + type: string + valueFrom: + description: VarSource describes a value from either a + secretmap or configmap. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeySelector selects a key of a + Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + port: + default: "5672" + type: string + queueName: + type: string + queueParams: + type: string + ssl: + default: "false" + type: string + username: + default: guest + type: string + vhost: + default: / + type: string + type: object + logs: + default: + queueName: etos-*-temp + description: RabbitMQ configuration. + properties: + deploy: + default: false + type: boolean + exchange: + default: amq.topic + type: string + host: + default: rabbitmq + type: string + password: + default: + value: guest + description: Var describes either a string value or a value + from a VarSource. + properties: + value: + type: string + valueFrom: + description: VarSource describes a value from either a + secretmap or configmap. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeySelector selects a key of a + Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + port: + default: "5672" + type: string + queueName: + type: string + queueParams: + type: string + ssl: + default: "false" + type: string + username: + default: guest + type: string + vhost: + default: / + type: string + type: object + type: object + required: + - database + - etos + - eventRepository + - messageBus + type: object + status: + description: ClusterStatus defines the observed state of Cluster + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/etos.eiffel-community.github.io_environmentrequests.yaml b/config/crd/bases/etos.eiffel-community.github.io_environmentrequests.yaml new file mode 100644 index 00000000..bae8329c --- /dev/null +++ b/config/crd/bases/etos.eiffel-community.github.io_environmentrequests.yaml @@ -0,0 +1,324 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: environmentrequests.etos.eiffel-community.github.io +spec: + group: etos.eiffel-community.github.io + names: + kind: EnvironmentRequest + listKind: EnvironmentRequestList + plural: environmentrequests + singular: environmentrequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: EnvironmentRequest is the Schema for the environmentrequests + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EnvironmentRequestSpec defines the desired state of EnvironmentRequest + properties: + artifact: + type: string + dataset: + description: 'TODO: Dataset per provider?' + x-kubernetes-preserve-unknown-fields: true + id: + description: ID is the ID for the environments generated. Will be + generated if nil + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + identifier: + type: string + identity: + type: string + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull a container + image + type: string + maximumAmount: + type: integer + minimumAmount: + type: integer + name: + type: string + providers: + properties: + executionSpace: + properties: + id: + type: string + testRunner: + type: string + required: + - id + - testRunner + type: object + iut: + properties: + id: + type: string + required: + - id + type: object + logArea: + properties: + id: + type: string + required: + - id + type: object + type: object + splitter: + properties: + tests: + items: + properties: + environment: + description: TestEnvironment to run tests within. + type: object + execution: + description: Execution describes hot to execute a testCase. + properties: + checkout: + items: + type: string + type: array + command: + type: string + environment: + additionalProperties: + type: string + type: object + execute: + items: + type: string + type: array + parameters: + additionalProperties: + type: string + type: object + testRunner: + type: string + required: + - checkout + - command + - environment + - parameters + - testRunner + type: object + id: + type: string + testCase: + description: TestCase metadata. + properties: + id: + type: string + tracker: + type: string + uri: + type: string + version: + type: string + required: + - id + type: object + required: + - environment + - execution + - id + - testCase + type: object + type: array + required: + - tests + type: object + required: + - image + - maximumAmount + - minimumAmount + - providers + - splitter + type: object + status: + description: EnvironmentRequestStatus defines the observed state of EnvironmentRequest + properties: + completionTime: + format: date-time + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + environmentProviders: + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + startTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/etos.eiffel-community.github.io_environments.yaml b/config/crd/bases/etos.eiffel-community.github.io_environments.yaml new file mode 100644 index 00000000..1b9aed99 --- /dev/null +++ b/config/crd/bases/etos.eiffel-community.github.io_environments.yaml @@ -0,0 +1,292 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: environments.etos.eiffel-community.github.io +spec: + group: etos.eiffel-community.github.io + names: + kind: Environment + listKind: EnvironmentList + plural: environments + singular: environment + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Active")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Environment is the Schema for the environments API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EnvironmentSpec defines the desired state of Environment + properties: + artifact: + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + context: + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + executor: + x-kubernetes-preserve-unknown-fields: true + iut: + x-kubernetes-preserve-unknown-fields: true + log_area: + x-kubernetes-preserve-unknown-fields: true + name: + type: string + priority: + type: integer + recipes: + items: + properties: + environment: + description: TestEnvironment to run tests within. + type: object + execution: + description: Execution describes hot to execute a testCase. + properties: + checkout: + items: + type: string + type: array + command: + type: string + environment: + additionalProperties: + type: string + type: object + execute: + items: + type: string + type: array + parameters: + additionalProperties: + type: string + type: object + testRunner: + type: string + required: + - checkout + - command + - environment + - parameters + - testRunner + type: object + id: + type: string + testCase: + description: TestCase metadata. + properties: + id: + type: string + tracker: + type: string + uri: + type: string + version: + type: string + required: + - id + type: object + required: + - environment + - execution + - id + - testCase + type: object + type: array + sub_suite_id: + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + suite_id: + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + test_runner: + type: string + test_suite_started_id: + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + required: + - artifact + - context + - executor + - iut + - log_area + - name + - priority + - recipes + - sub_suite_id + - suite_id + - test_runner + - test_suite_started_id + type: object + status: + description: EnvironmentStatus defines the observed state of Environment + properties: + completionTime: + format: date-time + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + environmentReleasers: + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/etos.eiffel-community.github.io_providers.yaml b/config/crd/bases/etos.eiffel-community.github.io_providers.yaml new file mode 100644 index 00000000..feebb6b6 --- /dev/null +++ b/config/crd/bases/etos.eiffel-community.github.io_providers.yaml @@ -0,0 +1,336 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: providers.etos.eiffel-community.github.io +spec: + group: etos.eiffel-community.github.io + names: + kind: Provider + listKind: ProviderList + plural: providers + singular: provider + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].reason + name: Reason + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].message + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Provider is the Schema for the providers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProviderSpec defines the desired state of Provider + properties: + healthCheck: + default: {} + description: |- + Healthcheck defines the health check endpoint and interval for providers. + The defaults of this should work most of the time. + properties: + endpoint: + default: /v1alpha1/selftest/ping + type: string + intervalSeconds: + default: 30 + type: integer + type: object + host: + type: string + jsontas: + description: |- + These are pointers so that they become nil in the Provider object in Kubernetes + and don't muddle up the yaml with empty data. + properties: + execution_space: + description: JSONTasExecutionSpace is the execution space provider + definition for the JSONTas provider + properties: + checkin: + x-kubernetes-preserve-unknown-fields: true + checkout: + x-kubernetes-preserve-unknown-fields: true + id: + type: string + list: + description: JSONTasList is the List command in the JSONTas + provider. + properties: + available: + x-kubernetes-preserve-unknown-fields: true + possible: + x-kubernetes-preserve-unknown-fields: true + required: + - available + - possible + type: object + required: + - id + - list + type: object + image: + type: string + iut: + description: |- + These are pointers so that they become nil in the Provider object in Kubernetes + and don't muddle up the yaml with empty data. + properties: + checkin: + x-kubernetes-preserve-unknown-fields: true + checkout: + x-kubernetes-preserve-unknown-fields: true + id: + type: string + list: + description: JSONTasList is the List command in the JSONTas + provider. + properties: + available: + x-kubernetes-preserve-unknown-fields: true + possible: + x-kubernetes-preserve-unknown-fields: true + required: + - available + - possible + type: object + prepare: + description: JSONTasIUTPrepare defines the preparation required + for an IUT. + properties: + stages: + description: JSONTasIUTPrepareaStages defines the preparation + stages required for an IUT. + properties: + environment_provider: + description: Underscore used in these due to backwards + compatibility + properties: + steps: + default: {} + x-kubernetes-preserve-unknown-fields: true + type: object + suite_runner: + description: Stage is the definition of a stage where + to execute steps. + properties: + steps: + default: {} + x-kubernetes-preserve-unknown-fields: true + type: object + test_runner: + description: Stage is the definition of a stage where + to execute steps. + properties: + steps: + default: {} + x-kubernetes-preserve-unknown-fields: true + type: object + required: + - environment_provider + - suite_runner + - test_runner + type: object + required: + - stages + type: object + required: + - id + - list + type: object + log: + description: JSONTasLogArea is the log area provider definition + for the JSONTas provider + properties: + checkin: + x-kubernetes-preserve-unknown-fields: true + checkout: + x-kubernetes-preserve-unknown-fields: true + id: + type: string + list: + description: JSONTasList is the List command in the JSONTas + provider. + properties: + available: + x-kubernetes-preserve-unknown-fields: true + possible: + x-kubernetes-preserve-unknown-fields: true + required: + - available + - possible + type: object + required: + - id + - list + type: object + type: object + jsontasSource: + description: VarSource describes a value from either a secretmap or + configmap. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: + enum: + - execution-space + - iut + - log-area + type: string + required: + - type + type: object + status: + description: ProviderStatus defines the observed state of Provider + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/etos.eiffel-community.github.io_testruns.yaml b/config/crd/bases/etos.eiffel-community.github.io_testruns.yaml new file mode 100644 index 00000000..3e06699a --- /dev/null +++ b/config/crd/bases/etos.eiffel-community.github.io_testruns.yaml @@ -0,0 +1,442 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: testruns.etos.eiffel-community.github.io +spec: + group: etos.eiffel-community.github.io + names: + kind: TestRun + listKind: TestRunList + plural: testruns + singular: testrun + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Environment")].reason + name: Environment + type: string + - jsonPath: .status.conditions[?(@.type=="SuiteRunner")].reason + name: Suiterunner + type: string + - jsonPath: .status.conditions[?(@.type=="Active")].status + name: Active + type: string + - jsonPath: .status.verdict + name: Verdict + type: string + - jsonPath: .status.conditions[?(@.type=="Active")].message + name: Message + type: string + - jsonPath: .metadata.labels.etos\.eiffel-community\.github\.io/id + name: ID + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TestRun is the Schema for the testruns API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TestRunSpec defines the desired state of TestRun + properties: + artifact: + description: Artifact is the ID of the software under test. + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + cluster: + description: Name of the ETOS cluster to execute the testrun in. + type: string + environmentProvider: + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + required: + - image + type: object + id: + description: ID is the test suite ID for this execution. Will be generated + if nil + pattern: '[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}' + type: string + identity: + type: string + logListener: + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + required: + - image + type: object + providers: + description: |- + Providers to use for test execution. These names must correspond to existing + Provider kinds in the namespace where a testrun is created. + properties: + executionSpace: + type: string + iut: + type: string + logArea: + type: string + required: + - executionSpace + - iut + - logArea + type: object + retention: + properties: + failure: + type: string + success: + type: string + type: object + suiteRunner: + properties: + image: + type: string + imagePullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + required: + - image + type: object + suites: + items: + description: Suite to execute. + properties: + dataset: + description: Dataset for this suite. + x-kubernetes-preserve-unknown-fields: true + name: + description: Name of the test suite. + type: string + priority: + default: 1 + description: Priority to execute the test suite. + type: integer + tests: + description: Tests to execute as part of this testrun. + items: + properties: + environment: + description: TestEnvironment to run tests within. + type: object + execution: + description: Execution describes hot to execute a testCase. + properties: + checkout: + items: + type: string + type: array + command: + type: string + environment: + additionalProperties: + type: string + type: object + execute: + items: + type: string + type: array + parameters: + additionalProperties: + type: string + type: object + testRunner: + type: string + required: + - checkout + - command + - environment + - parameters + - testRunner + type: object + id: + type: string + testCase: + description: TestCase metadata. + properties: + id: + type: string + tracker: + type: string + uri: + type: string + version: + type: string + required: + - id + type: object + required: + - environment + - execution + - id + - testCase + type: object + type: array + required: + - dataset + - name + - priority + - tests + type: object + type: array + testRunner: + properties: + version: + type: string + required: + - version + type: object + required: + - artifact + - identity + - providers + - suites + type: object + status: + description: TestRunStatus defines the observed state of TestRun + properties: + completionTime: + format: date-time + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + environmentRequests: + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + startTime: + format: date-time + type: string + suiteRunners: + items: + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + verdict: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 00000000..42f64c86 --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,35 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/etos.eiffel-community.github.io_testruns.yaml +- bases/etos.eiffel-community.github.io_providers.yaml +- bases/etos.eiffel-community.github.io_environments.yaml +- bases/etos.eiffel-community.github.io_clusters.yaml +- bases/etos.eiffel-community.github.io_environmentrequests.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +- path: patches/webhook_in_providers.yaml +- path: patches/webhook_in_testruns.yaml +- path: patches/webhook_in_environmentrequests.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +- path: patches/cainjection_in_testruns.yaml +- path: patches/cainjection_in_providers.yaml + # - path: patches/cainjection_in_environments.yaml + # - path: patches/cainjection_in_clusters.yaml + # - path: patches/cainjection_in_environmentrequests.yaml +#- path: patches/cainjection_in_testruns.yaml +#- path: patches/cainjection_in_environmentrequests.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. + +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_environmentrequests.yaml b/config/crd/patches/cainjection_in_environmentrequests.yaml new file mode 100644 index 00000000..6ead7bf1 --- /dev/null +++ b/config/crd/patches/cainjection_in_environmentrequests.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: environmentrequests.etos.eiffel-community.github.io diff --git a/config/crd/patches/cainjection_in_providers.yaml b/config/crd/patches/cainjection_in_providers.yaml new file mode 100644 index 00000000..102206ce --- /dev/null +++ b/config/crd/patches/cainjection_in_providers.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: providers.etos.eiffel-community.github.io diff --git a/config/crd/patches/cainjection_in_testruns.yaml b/config/crd/patches/cainjection_in_testruns.yaml new file mode 100644 index 00000000..e02bf298 --- /dev/null +++ b/config/crd/patches/cainjection_in_testruns.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: testruns.etos.eiffel-community.github.io diff --git a/config/crd/patches/webhook_in_environmentrequests.yaml b/config/crd/patches/webhook_in_environmentrequests.yaml new file mode 100644 index 00000000..894fb134 --- /dev/null +++ b/config/crd/patches/webhook_in_environmentrequests.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: environmentrequests.etos.eiffel-community.github.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_providers.yaml b/config/crd/patches/webhook_in_providers.yaml new file mode 100644 index 00000000..38c3a4e4 --- /dev/null +++ b/config/crd/patches/webhook_in_providers.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: providers.etos.eiffel-community.github.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_testruns.yaml b/config/crd/patches/webhook_in_testruns.yaml new file mode 100644 index 00000000..fc9e0552 --- /dev/null +++ b/config/crd/patches/webhook_in_testruns.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: testruns.etos.eiffel-community.github.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 00000000..1ea65643 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,146 @@ +# Adds namespace to all resources. +namespace: etos-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: etos- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +# - ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml + +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +- path: manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +- path: webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +replacements: + - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldPath: .metadata.namespace # namespace of the certificate CR + targets: + - select: + kind: ValidatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - select: + kind: MutatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - select: + kind: CustomResourceDefinition + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 0 + create: true + - source: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldPath: .metadata.name + targets: + - select: + kind: ValidatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + - select: + kind: MutatingWebhookConfiguration + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + - select: + kind: CustomResourceDefinition + fieldPaths: + - .metadata.annotations.[cert-manager.io/inject-ca-from] + options: + delimiter: '/' + index: 1 + create: true + - source: # Add cert-manager annotation to the webhook Service + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.name # namespace of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 0 + create: true + - source: + kind: Service + version: v1 + name: webhook-service + fieldPath: .metadata.namespace # namespace of the service + targets: + - select: + kind: Certificate + group: cert-manager.io + version: v1 + fieldPaths: + - .spec.dnsNames.0 + - .spec.dnsNames.1 + options: + delimiter: '.' + index: 1 + create: true diff --git a/config/default/manager_metrics_patch.yaml b/config/default/manager_metrics_patch.yaml new file mode 100644 index 00000000..2aaef653 --- /dev/null +++ b/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000..738de350 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/metrics_service.yaml b/config/default/metrics_service.yaml new file mode 100644 index 00000000..8f8a5f1b --- /dev/null +++ b/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000..283c61b0 --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,25 @@ +# This patch add annotation to admission webhook config and +# CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: validatingwebhookconfiguration + app.kubernetes.io/instance: validating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: etos + app.kubernetes.io/part-of: etos + app.kubernetes.io/managed-by: kustomize + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 00000000..0af84fcb --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 00000000..1974eec9 --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..ed137168 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 00000000..14dbbfb7 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/cluster_editor_role.yaml b/config/rbac/cluster_editor_role.yaml new file mode 100644 index 00000000..990b3adb --- /dev/null +++ b/config/rbac/cluster_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit clusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: cluster-editor-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters/status + verbs: + - get diff --git a/config/rbac/cluster_viewer_role.yaml b/config/rbac/cluster_viewer_role.yaml new file mode 100644 index 00000000..d45a8454 --- /dev/null +++ b/config/rbac/cluster_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view clusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: cluster-viewer-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters + verbs: + - get + - list + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters/status + verbs: + - get diff --git a/config/rbac/environment_editor_role.yaml b/config/rbac/environment_editor_role.yaml new file mode 100644 index 00000000..17dc5f37 --- /dev/null +++ b/config/rbac/environment_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit environments. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environment-editor-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments/status + verbs: + - get diff --git a/config/rbac/environment_viewer_role.yaml b/config/rbac/environment_viewer_role.yaml new file mode 100644 index 00000000..89eba726 --- /dev/null +++ b/config/rbac/environment_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view environments. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environment-viewer-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments + verbs: + - get + - list + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments/status + verbs: + - get diff --git a/config/rbac/environmentrequest_editor_role.yaml b/config/rbac/environmentrequest_editor_role.yaml new file mode 100644 index 00000000..df9890b8 --- /dev/null +++ b/config/rbac/environmentrequest_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit environmentrequests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environmentrequest-editor-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests/status + verbs: + - get diff --git a/config/rbac/environmentrequest_viewer_role.yaml b/config/rbac/environmentrequest_viewer_role.yaml new file mode 100644 index 00000000..ef7b466b --- /dev/null +++ b/config/rbac/environmentrequest_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view environmentrequests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environmentrequest-viewer-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests + verbs: + - get + - list + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 00000000..159926f6 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,35 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- environmentrequest_editor_role.yaml +- environmentrequest_viewer_role.yaml +- cluster_editor_role.yaml +- cluster_viewer_role.yaml +- environment_editor_role.yaml +- environment_viewer_role.yaml +- provider_editor_role.yaml +- provider_viewer_role.yaml +- testrun_editor_role.yaml +- testrun_viewer_role.yaml + diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..2f9ba18b --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..9e022ea3 --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/metrics_auth_role.yaml b/config/rbac/metrics_auth_role.yaml new file mode 100644 index 00000000..32d2e4ec --- /dev/null +++ b/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/metrics_auth_role_binding.yaml b/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 00000000..e775d67f --- /dev/null +++ b/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/metrics_reader_role.yaml b/config/rbac/metrics_reader_role.yaml new file mode 100644 index 00000000..51a75db4 --- /dev/null +++ b/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/provider_editor_role.yaml b/config/rbac/provider_editor_role.yaml new file mode 100644 index 00000000..de222dad --- /dev/null +++ b/config/rbac/provider_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit providers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: provider-editor-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers/status + verbs: + - get diff --git a/config/rbac/provider_viewer_role.yaml b/config/rbac/provider_viewer_role.yaml new file mode 100644 index 00000000..d324f23b --- /dev/null +++ b/config/rbac/provider_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view providers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: provider-viewer-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers + verbs: + - get + - list + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 00000000..0e51bf0c --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,284 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - '*' + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - jobs + verbs: + - delete + - get + - list + - watch +- apiGroups: + - '*' + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - '*' + resources: + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs/status + verbs: + - get +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters/finalizers + verbs: + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - clusters/status + verbs: + - get + - patch + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests/finalizers + verbs: + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentrequests/status + verbs: + - get + - patch + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments/finalizers + verbs: + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environments/status + verbs: + - get + - patch + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - environmentsrequests/status + verbs: + - get +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers/finalizers + verbs: + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - providers/status + verbs: + - get + - patch + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns/finalizers + verbs: + - update +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns/status + verbs: + - get + - patch + - update diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 00000000..50aa3a51 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 00000000..3f39ab7c --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/config/rbac/testrun_editor_role.yaml b/config/rbac/testrun_editor_role.yaml new file mode 100644 index 00000000..a6ba20c9 --- /dev/null +++ b/config/rbac/testrun_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit testruns. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: testrun-editor-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns/status + verbs: + - get diff --git a/config/rbac/testrun_viewer_role.yaml b/config/rbac/testrun_viewer_role.yaml new file mode 100644 index 00000000..258cbc60 --- /dev/null +++ b/config/rbac/testrun_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view testruns. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: testrun-viewer-role +rules: +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns + verbs: + - get + - list + - watch +- apiGroups: + - etos.eiffel-community.github.io + resources: + - testruns/status + verbs: + - get diff --git a/config/samples/etos_v1alpha1_cluster.yaml b/config/samples/etos_v1alpha1_cluster.yaml new file mode 100644 index 00000000..7740833e --- /dev/null +++ b/config/samples/etos_v1alpha1_cluster.yaml @@ -0,0 +1,34 @@ +apiVersion: etos.eiffel-community.github.io/v1alpha1 +kind: Cluster +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: cluster-sample +spec: + etos: + api: + image: "registry.nordix.org/eiffel/etos-api:672f982e" + sse: + image: "registry.nordix.org/eiffel/etos-sse:672f982e" + logArea: + image: "registry.nordix.org/eiffel/etos-logarea:672f982e" + ingress: + enabled: true + database: + deploy: true + messageBus: + eiffel: + deploy: true + queueName: "etos" + logs: + deploy: true + queueName: "etos.*.log" + eventRepository: + deploy: true + apiImage: registry.nordix.org/eiffel/eiffel-graphql-api:latest + storageImage: registry.nordix.org/eiffel/eiffel-graphql-storage:latest + mongo: + deploy: true + ingress: + enabled: true diff --git a/config/samples/etos_v1alpha1_environment.yaml b/config/samples/etos_v1alpha1_environment.yaml new file mode 100644 index 00000000..3cf287dd --- /dev/null +++ b/config/samples/etos_v1alpha1_environment.yaml @@ -0,0 +1,9 @@ +apiVersion: etos.eiffel-community.github.io/v1alpha1 +kind: Environment +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environment-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/etos_v1alpha1_environmentrequest.yaml b/config/samples/etos_v1alpha1_environmentrequest.yaml new file mode 100644 index 00000000..20be138b --- /dev/null +++ b/config/samples/etos_v1alpha1_environmentrequest.yaml @@ -0,0 +1,9 @@ +apiVersion: etos.eiffel-community.github.io/v1alpha1 +kind: EnvironmentRequest +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: environmentrequest-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/etos_v1alpha1_provider.yaml b/config/samples/etos_v1alpha1_provider.yaml new file mode 100644 index 00000000..c13d9a91 --- /dev/null +++ b/config/samples/etos_v1alpha1_provider.yaml @@ -0,0 +1,9 @@ +apiVersion: etos.eiffel-community.github.io/v1alpha1 +kind: Provider +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: provider-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/etos_v1alpha1_testrun.yaml b/config/samples/etos_v1alpha1_testrun.yaml new file mode 100644 index 00000000..00b63892 --- /dev/null +++ b/config/samples/etos_v1alpha1_testrun.yaml @@ -0,0 +1,9 @@ +apiVersion: etos.eiffel-community.github.io/v1alpha1 +kind: TestRun +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: testrun-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 00000000..07164ed7 --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,8 @@ +## Append samples of your project ## +resources: +- etos_v1alpha1_testrun.yaml +- etos_v1alpha1_provider.yaml +- etos_v1alpha1_environment.yaml +- etos_v1alpha1_cluster.yaml +- etos_v1alpha1_environmentrequest.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000..9cf26134 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000..206316e5 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,22 @@ +# the following config is for teaching kustomize where to look at when substituting nameReference. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000..c1bf29e4 --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,112 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-etos-eiffel-community-github-io-v1alpha1-environmentrequest + failurePolicy: Fail + name: menvironmentrequest.kb.io + rules: + - apiGroups: + - etos.eiffel-community.github.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - environmentrequests + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-etos-eiffel-community-github-io-v1alpha1-provider + failurePolicy: Fail + name: mprovider.kb.io + rules: + - apiGroups: + - etos.eiffel-community.github.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - providers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-etos-eiffel-community-github-io-v1alpha1-testrun + failurePolicy: Fail + name: mtestrun.kb.io + rules: + - apiGroups: + - etos.eiffel-community.github.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - testruns + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-etos-eiffel-community-github-io-v1alpha1-provider + failurePolicy: Fail + name: mprovider.kb.io + rules: + - apiGroups: + - etos.eiffel-community.github.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - providers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-etos-eiffel-community-github-io-v1alpha1-testrun + failurePolicy: Fail + name: mtestrun.kb.io + rules: + - apiGroups: + - etos.eiffel-community.github.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - testruns + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000..4b3736f6 --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: etos + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..8765be1b --- /dev/null +++ b/go.mod @@ -0,0 +1,97 @@ +module github.com/eiffel-community/etos + +go 1.22.0 + +toolchain go1.22.1 + +require ( + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + k8s.io/api v0.30.1 + k8s.io/apiextensions-apiserver v0.30.1 + k8s.io/apimachinery v0.30.1 + k8s.io/client-go v0.30.1 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.18.4 +) + +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.17.8 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.18.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiserver v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..9ce9c0dd --- /dev/null +++ b/go.sum @@ -0,0 +1,253 @@ +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 00000000..57fe0176 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go new file mode 100644 index 00000000..a58daa89 --- /dev/null +++ b/internal/controller/cluster_controller.go @@ -0,0 +1,169 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + "github.com/eiffel-community/etos/internal/etos" + "github.com/eiffel-community/etos/internal/extras" +) + +// ClusterReconciler reconciles a Cluster object +type ClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=clusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=clusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments,verbs=get;list;watch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=testruns,verbs=get;list;watch;create;delete;deletecollection +// +kubebuilder:rbac:groups=*,resources=roles,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=services,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=*,resources=pods,verbs=get;list;watch +// +kubebuilder:rbac:groups=*,resources=jobs,verbs=get;list;watch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger = logger.WithValues("namespace", req.Namespace, "name", req.Name) + + // TODO: Logstash + + cluster := &etosv1alpha1.Cluster{} + err := r.Get(ctx, req.NamespacedName, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + logger.Info("cluster not found. ignoring object") + return ctrl.Result{}, nil + } + logger.Error(err, "failed to get cluster") + return ctrl.Result{}, err + } + + eiffelbus := extras.NewRabbitMQDeployment(cluster.Spec.MessageBus.EiffelMessageBus, r.Scheme, r.Client) + if err := eiffelbus.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling the Eiffel event bus") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + etosbus := extras.NewMessageBusDeployment(cluster.Spec.MessageBus.ETOSMessageBus, r.Scheme, r.Client) + if err := etosbus.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling the ETOS message bus") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + mongodb := extras.NewMongoDBDeployment(cluster.Spec.EventRepository.Database, r.Scheme, r.Client) + if err := mongodb.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling the Eiffel event bus database") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + eventrepository := extras.NewEventRepositoryDeployment(&cluster.Spec.EventRepository, r.Scheme, r.Client, mongodb, eiffelbus.SecretName) + if err := eventrepository.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling the Eiffel event repository") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + etcd := etos.NewETCDDeployment(&cluster.Spec.Database, r.Scheme, r.Client) + if err := etcd.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling the ETOS database") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + etos := etos.NewETOSDeployment(cluster.Spec.ETOS, r.Scheme, r.Client, eiffelbus.SecretName, etosbus.SecretName) + if err := etos.Reconcile(ctx, cluster); err != nil { + if apierrors.IsConflict(err) || apierrors.IsNotFound(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Error reconciling ETOS") + return r.update(ctx, cluster, metav1.ConditionFalse, err.Error()) + } + + return r.update(ctx, cluster, metav1.ConditionTrue, "Cluster is up and running") +} + +// update will set the status condition and update the status of the ETOS cluster. +// if the update fails due to conflict the reconciliation will requeue after one second. +func (r *ClusterReconciler) update(ctx context.Context, cluster *etosv1alpha1.Cluster, status metav1.ConditionStatus, message string) (ctrl.Result, error) { + if meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{Type: StatusReady, Status: status, Reason: "Ready", Message: message}) { + if err := r.Status().Update(ctx, cluster); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{RequeueAfter: time.Second}, nil + } + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&etosv1alpha1.Cluster{}). + Owns(&appsv1.Deployment{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Secret{}). + Owns(&corev1.Service{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&networkingv1.Ingress{}). + Complete(r) +} diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go new file mode 100644 index 00000000..bc21b7c8 --- /dev/null +++ b/internal/controller/cluster_controller_test.go @@ -0,0 +1,84 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +var _ = Describe("Cluster Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + cluster := &etosv1alpha1.Cluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Cluster") + err := k8sClient.Get(ctx, typeNamespacedName, cluster) + if err != nil && errors.IsNotFound(err) { + resource := &etosv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &etosv1alpha1.Cluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Cluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/environment_controller.go b/internal/controller/environment_controller.go new file mode 100644 index 00000000..b55ef64a --- /dev/null +++ b/internal/controller/environment_controller.go @@ -0,0 +1,324 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "errors" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ref "k8s.io/client-go/tools/reference" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +const releaseFinalizer = "etos.eiffel-community.github.io/release" + +// EnvironmentReconciler reconciles a Environment object +type EnvironmentReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments/finalizers,verbs=update +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers,verbs=get +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers/status,verbs=get + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *EnvironmentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + environment := &etosv1alpha1.Environment{} + err := r.Get(ctx, req.NamespacedName, environment) + if err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + // If the environment is considered 'Completed', it has been released. Check that the object is + // being deleted and contains the finalizer and remove the finalizer. + if environment.Status.CompletionTime != nil { + if !environment.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(environment, releaseFinalizer) { + controllerutil.RemoveFinalizer(environment, releaseFinalizer) + if err := r.Update(ctx, environment); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, err + } + } + } + return ctrl.Result{}, nil + } + + if err := r.reconcile(ctx, environment); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// reconcile an environment resource to its desired state. +func (r *EnvironmentReconciler) reconcile(ctx context.Context, environment *etosv1alpha1.Environment) error { + logger := log.FromContext(ctx) + + // Set initial statuses if not set. + if meta.FindStatusCondition(environment.Status.Conditions, StatusActive) == nil { + meta.SetStatusCondition(&environment.Status.Conditions, metav1.Condition{Status: metav1.ConditionTrue, Type: StatusActive, Message: "Actively being used", Reason: "Active"}) + return r.Status().Update(ctx, environment) + } + if environment.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(environment, releaseFinalizer) { + controllerutil.AddFinalizer(environment, releaseFinalizer) + return r.Update(ctx, environment) + } + } + + // Get active, finished and failed environment releasers. + releasers, err := jobStatus(ctx, r, environment.Namespace, environment.Name, EnvironmentOwnerKey) + if err != nil { + return err + } + + environment.Status.EnvironmentReleasers = nil + for _, activeReleaser := range releasers.activeJobs { + jobRef, err := ref.GetReference(r.Scheme, activeReleaser) + if err != nil { + logger.Error(err, "failed to make reference to active environment releaser", "releaser", activeReleaser) + continue + } + environment.Status.EnvironmentReleasers = append(environment.Status.EnvironmentReleasers, *jobRef) + } + if err := r.Status().Update(ctx, environment); err != nil { + return err + } + logger.V(1).Info("environment releaser count", "active", len(releasers.activeJobs), "successful", len(releasers.successfulJobs), "failed", len(releasers.failedJobs)) + + // TODO: Provider information does not exist in a deterministic way in the Environment resource + // so either we need to find the EnvironmentRequest or the Environment resource needs an update. + // if err := checkProviders(ctx, r, environment.Namespace, environment.Spec.Providers); err != nil { + // return err + // } + + if err := r.reconcileReleaser(ctx, releasers, environment); err != nil { + return err + } + + // There is no explicit retry here as it is not necessarily needed. If releasers is not successful + // then the Job will get deleted after a while. When that job is deleted, a reconcile is called for + // and the Environment will try to get released again. + if releasers.successful() { + environmentCondition := meta.FindStatusCondition(environment.Status.Conditions, StatusActive) + environment.Status.CompletionTime = &environmentCondition.LastTransitionTime + return r.Status().Update(ctx, environment) + } + + return nil +} + +// reconcileReleaser will check the status of environment releasers, create new ones if necessary. +func (r *EnvironmentReconciler) reconcileReleaser(ctx context.Context, releasers *jobs, environment *etosv1alpha1.Environment) error { + logger := log.FromContext(ctx) + + // Environment releaser failed, setting status. + if releasers.failed() { + releaser := releasers.failedJobs[0] // TODO: We should allow multiple releaser jobs in the future + result, err := terminationLog(ctx, r, releaser) + if err != nil { + result.Description = err.Error() + } + if result.Description == "" { + result.Description = "Failed to release an environment - Unknown error" + } + if meta.SetStatusCondition(&environment.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, environment) + } + } + // Environment releaser successful, setting status. + if releasers.successful() { + releaser := releasers.successfulJobs[0] // TODO: We should allow multiple releaser jobs in the future + result, err := terminationLog(ctx, r, releaser) + if err != nil { + result.Description = err.Error() + } + if result.Conclusion == ConclusionFailed { + if meta.SetStatusCondition(&environment.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, environment) + } + } + if meta.SetStatusCondition(&environment.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionFalse, Reason: "Released", Message: result.Description}) { + for _, environmentProvider := range releasers.successfulJobs { + if err := r.Delete(ctx, environmentProvider, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + } + return r.Status().Update(ctx, environment) + } + } + // Suite runners active, setting status + if releasers.active() { + if meta.SetStatusCondition(&environment.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionFalse, Reason: "Releasing", Message: "Environment is being released"}) { + return r.Status().Update(ctx, environment) + } + } + // Environment is being released and no releaser is active, create an environment releaser + if releasers.empty() && !environment.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(environment, releaseFinalizer) { + logger.Info("Environment is being deleted, release it") + environmentRequest, err := r.environmentRequest(ctx, environment) + if err != nil { + return err + } + releaser := r.releaseJob(environment, environmentRequest) + fmt.Println(releaser) + if err := ctrl.SetControllerReference(environment, releaser, r.Scheme); err != nil { + return err + } + if err := r.Create(ctx, releaser); err != nil { + return err + } + } + } + return nil +} + +// environmentRequest that owns an environment +func (r *EnvironmentReconciler) environmentRequest(ctx context.Context, environment *etosv1alpha1.Environment) (*etosv1alpha1.EnvironmentRequest, error) { + environmentRequestName := "" + for _, owner := range environment.GetOwnerReferences() { + if owner.Kind == "EnvironmentRequest" { + environmentRequestName = owner.Name + } + } + if environmentRequestName == "" { + return nil, errors.New("failed to find EnvironmentRequest owner") + } + environmentRequest := &etosv1alpha1.EnvironmentRequest{} + err := r.Get(ctx, types.NamespacedName{Name: environmentRequestName, Namespace: environment.Namespace}, environmentRequest) + if err != nil { + return nil, err + } + return environmentRequest, nil +} + +// releaseJob is the job definition for an environment releaser. +func (r EnvironmentReconciler) releaseJob(environment *etosv1alpha1.Environment, environmentRequest *etosv1alpha1.EnvironmentRequest) *batchv1.Job { + id := environment.Labels["etos.eiffel-community.github.io/id"] + cluster := environment.Labels["etos.eiffel-community.github.io/cluster"] + ttl := int32(300) + grace := int64(30) + backoff := int32(0) + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "etos.eiffel-community.github.io/id": id, + "etos.eiffel-community.github.io/sub-suite": environment.Name, + "etos.eiffel-community.github.io/cluster": cluster, + "app.kubernetes.io/name": "environment-releaser", + "app.kubernetes.io/part-of": "etos", + }, + Annotations: make(map[string]string), + Name: environment.Name, + Namespace: environment.Namespace, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &ttl, + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: environment.Name, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: &grace, + ServiceAccountName: fmt.Sprintf("%s-provider", cluster), + RestartPolicy: "Never", + Containers: []corev1.Container{ + { + Name: environment.Name, + Image: environmentRequest.Spec.Image.Image, + ImagePullPolicy: environmentRequest.Spec.ImagePullPolicy, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("250m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + Command: []string{"python", "-u", "-m", "environment_provider.environment"}, + Args: []string{environment.Name}, + }, + }, + }, + }, + }, + } +} + +// registerOwnerIndexForJob will set an index of the jobs that an environment owns. +func (r *EnvironmentReconciler) registerOwnerIndexForJob(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, EnvironmentOwnerKey, func(rawObj client.Object) []string { + job := rawObj.(*batchv1.Job) + owner := metav1.GetControllerOf(job) + if owner == nil { + return nil + } + if owner.APIVersion != APIGroupVersionString || owner.Kind != "Environment" { + return nil + } + + return []string{owner.Name} + }); err != nil { + return err + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *EnvironmentReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Register indexes for faster lookups + if err := r.registerOwnerIndexForJob(mgr); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&etosv1alpha1.Environment{}). + Owns(&batchv1.Job{}). + Complete(r) +} diff --git a/internal/controller/environment_controller_test.go b/internal/controller/environment_controller_test.go new file mode 100644 index 00000000..f09faad1 --- /dev/null +++ b/internal/controller/environment_controller_test.go @@ -0,0 +1,84 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +var _ = Describe("Environment Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + environment := &etosv1alpha1.Environment{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Environment") + err := k8sClient.Get(ctx, typeNamespacedName, environment) + if err != nil && errors.IsNotFound(err) { + resource := &etosv1alpha1.Environment{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &etosv1alpha1.Environment{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Environment") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &EnvironmentReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/environmentrequest_controller.go b/internal/controller/environmentrequest_controller.go new file mode 100644 index 00000000..f8370c92 --- /dev/null +++ b/internal/controller/environmentrequest_controller.go @@ -0,0 +1,411 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ref "k8s.io/client-go/tools/reference" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +// EnvironmentRequestReconciler reconciles a EnvironmentRequest object +type EnvironmentRequestReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environmentrequests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environmentrequests/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environmentrequests/finalizers,verbs=update +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers,verbs=get +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers/status,verbs=get + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *EnvironmentRequestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Get environment if exists. + environmentrequest := &etosv1alpha1.EnvironmentRequest{} + err := r.Get(ctx, req.NamespacedName, environmentrequest) + if err != nil { + if apierrors.IsNotFound(err) { + logger.Info("environmentrequest not found. ignoring object") + return ctrl.Result{}, nil + } + logger.Error(err, "failed to get environmentrequest") + return ctrl.Result{}, err + } + if environmentrequest.Status.CompletionTime != nil { + return ctrl.Result{}, nil + } + if err := r.reconcile(ctx, environmentrequest); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *EnvironmentRequestReconciler) reconcile(ctx context.Context, environmentrequest *etosv1alpha1.EnvironmentRequest) error { + logger := log.FromContext(ctx) + + // Set initial statuses if not set. + if meta.FindStatusCondition(environmentrequest.Status.Conditions, StatusReady) == nil { + logger.Info("Set ready status") + meta.SetStatusCondition(&environmentrequest.Status.Conditions, metav1.Condition{Status: metav1.ConditionFalse, Type: StatusReady, Message: "Reconciliation started", Reason: "Pending"}) + return r.Status().Update(ctx, environmentrequest) + } + + // Get active, finished and failed environment providers. + // TODO: Handle unique names for environment jobs. + environmentProviders, err := jobStatus(ctx, r, environmentrequest.Namespace, environmentrequest.Name, EnvironmentRequestOwnerKey) + if err != nil { + return err + } + + environmentrequest.Status.EnvironmentProviders = nil + for _, activeProvider := range environmentProviders.activeJobs { + jobRef, err := ref.GetReference(r.Scheme, activeProvider) + if err != nil { + logger.Error(err, "failed to make reference to active environment provider", "provider", activeProvider) + continue + } + environmentrequest.Status.EnvironmentProviders = append(environmentrequest.Status.EnvironmentProviders, *jobRef) + } + if err := r.Status().Update(ctx, environmentrequest); err != nil { + return err + } + logger.V(1).Info("environment provider count", "active", len(environmentProviders.activeJobs), "successful", len(environmentProviders.successfulJobs), "failed", len(environmentProviders.failedJobs)) + + // Check providers availability + // TODO Update status + providers := etosv1alpha1.Providers{ + IUT: environmentrequest.Spec.Providers.IUT.ID, + ExecutionSpace: environmentrequest.Spec.Providers.ExecutionSpace.ID, + LogArea: environmentrequest.Spec.Providers.LogArea.ID, + } + if err := checkProviders(ctx, r, environmentrequest.Namespace, providers); err != nil { + return err + } + + // Reconcile environment provider + // TODO Update status + if err := r.reconcileEnvironmentProvider(ctx, environmentProviders, environmentrequest); err != nil { + return err + } + + if environmentProviders.failed() { + if environmentrequest.Status.CompletionTime == nil { + environmentCondition := meta.FindStatusCondition(environmentrequest.Status.Conditions, StatusReady) + environmentrequest.Status.CompletionTime = &environmentCondition.LastTransitionTime + return r.Status().Update(ctx, environmentrequest) + } + } + if environmentProviders.successful() { + if environmentrequest.Status.CompletionTime == nil { + environmentCondition := meta.FindStatusCondition(environmentrequest.Status.Conditions, StatusReady) + environmentrequest.Status.CompletionTime = &environmentCondition.LastTransitionTime + return r.Status().Update(ctx, environmentrequest) + } + } + + return nil +} + +// reconcileEnvironmentProvider will check the status of environment providers, create new ones if necessary. +func (r *EnvironmentRequestReconciler) reconcileEnvironmentProvider(ctx context.Context, providers *jobs, environmentrequest *etosv1alpha1.EnvironmentRequest) error { + // Environment provider failed, setting status. + if providers.failed() { + environmentProvider := providers.failedJobs[0] // TODO: We should support multiple providers in the future + result, err := terminationLog(ctx, r, environmentProvider) + if err != nil { + result.Description = err.Error() + } + if result.Description == "" { + result.Description = "Failed to provision an environment - Unknown error" + } + if meta.SetStatusCondition(&environmentrequest.Status.Conditions, metav1.Condition{Type: StatusReady, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, environmentrequest) + } + } + // Environment provider successful, setting status. + if providers.successful() { + environmentProvider := providers.successfulJobs[0] // TODO: We should support multiple providers in the future + result, err := terminationLog(ctx, r, environmentProvider) + if err != nil { + result.Description = err.Error() + } + if result.Conclusion == ConclusionFailed { + if meta.SetStatusCondition(&environmentrequest.Status.Conditions, metav1.Condition{Type: StatusReady, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, environmentrequest) + } + } + if meta.SetStatusCondition(&environmentrequest.Status.Conditions, metav1.Condition{Type: StatusReady, Status: metav1.ConditionTrue, Reason: "Done", Message: result.Description}) { + for _, environmentProvider := range providers.successfulJobs { + if err := r.Delete(ctx, environmentProvider, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + } + return r.Status().Update(ctx, environmentrequest) + } + } + // Suite runners active, setting status + if providers.active() { + if meta.SetStatusCondition(&environmentrequest.Status.Conditions, metav1.Condition{Type: StatusReady, Status: metav1.ConditionFalse, Reason: "Running", Message: "Environment provider is running"}) { + return r.Status().Update(ctx, environmentrequest) + } + } + // No environment providers, create environment provider + if providers.empty() { + environmentProvider := r.environmentProviderJob(environmentrequest) + if err := ctrl.SetControllerReference(environmentrequest, environmentProvider, r.Scheme); err != nil { + return err + } + if err := r.Create(ctx, environmentProvider); err != nil { + return err + } + } + return nil +} + +// environmentProviderJob is the job definition for an etos environment provider. +func (r EnvironmentRequestReconciler) environmentProviderJob(environmentrequest *etosv1alpha1.EnvironmentRequest) *batchv1.Job { + ttl := int32(300) + grace := int64(30) + backoff := int32(0) + // TODO: Cluster might not be a part of the environment request. + cluster := environmentrequest.Labels["etos.eiffel-community.github.io/cluster"] + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "etos.eiffel-community.github.io/id": environmentrequest.Spec.Identifier, // TODO: omitempty + "etos.eiffel-community.github.io/cluster": cluster, + "app.kubernetes.io/name": "environment-provider", + "app.kubernetes.io/part-of": "etos", + }, + Annotations: make(map[string]string), + Name: environmentrequest.Name, + Namespace: environmentrequest.Namespace, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &ttl, + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: environmentrequest.Name, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: &grace, + ServiceAccountName: fmt.Sprintf("%s-provider", cluster), + RestartPolicy: "Never", + Containers: []corev1.Container{ + { + Name: environmentrequest.Name, + Image: environmentrequest.Spec.Image.Image, + ImagePullPolicy: environmentrequest.Spec.ImagePullPolicy, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("250m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-rabbitmq", cluster), + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-messagebus", cluster), + }, + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "REQUEST", + Value: environmentrequest.Name, + }, + }, + }, + }, + }, + }, + }, + } +} + +// registerOwnerIndexForJob will set an index of the jobs that an environment request owns. +func (r *EnvironmentRequestReconciler) registerOwnerIndexForJob(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, EnvironmentRequestOwnerKey, func(rawObj client.Object) []string { + job := rawObj.(*batchv1.Job) + owner := metav1.GetControllerOf(job) + if owner == nil { + return nil + } + if owner.APIVersion != APIGroupVersionString || owner.Kind != "EnvironmentRequest" { + return nil + } + + return []string{owner.Name} + }); err != nil { + return err + } + return nil +} + +// findEnvironmentRequestsForIUTProvider will return reconciliation requests for each Provider object that an environment request has stored +// in its spec as IUT. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *EnvironmentRequestReconciler) findEnvironmentRequestsForIUTProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findEnvironmentRequestsForObject(ctx, iutProvider, provider) +} + +// findEnvironmentRequestsForIUTProvider will return reconciliation requests for each Provider object that an environment request has stored +// in its spec as execution space. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *EnvironmentRequestReconciler) findEnvironmentRequestsForExecutionSpaceProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findEnvironmentRequestsForObject(ctx, executionSpaceProvider, provider) +} + +// findEnvironmentRequestsForIUTProvider will return reconciliation requests for each Provider object that an environment request has stored +// in its spec as log area. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *EnvironmentRequestReconciler) findEnvironmentRequestsForLogAreaProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findEnvironmentRequestsForObject(ctx, logAreaProvider, provider) +} + +// findEnvironmentRequestsForTestrun will return reconciliation requests for each testrun object that an environment request has stored +// in its spec. +func (r *EnvironmentRequestReconciler) FindEnvironmentRequestsForTestrun(ctx context.Context, testrun client.Object) []reconcile.Request { + return r.findEnvironmentRequestsForObject(ctx, ".spec.testrun", testrun) +} + +// findEnvironmentRequestsForObject will find environment requests for a kubernetes object. +func (r *EnvironmentRequestReconciler) findEnvironmentRequestsForObject(ctx context.Context, name string, obj client.Object) []reconcile.Request { + environmentRequestList := &etosv1alpha1.EnvironmentRequestList{} + listOps := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(name, obj.GetName()), + Namespace: obj.GetNamespace(), + } + err := r.List(ctx, environmentRequestList, listOps) + if err != nil { + return []reconcile.Request{} + } + + requests := make([]reconcile.Request, len(environmentRequestList.Items)) + for i, item := range environmentRequestList.Items { + requests[i] = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + } + } + return requests +} + +// SetupWithManager sets up the controller with the Manager. +func (r *EnvironmentRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Register indexes for faster lookups + if err := r.registerOwnerIndexForJob(mgr); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.EnvironmentRequest{}, iutProvider, func(rawObj client.Object) []string { + environmentRequest := rawObj.(*etosv1alpha1.EnvironmentRequest) + return []string{environmentRequest.Spec.Providers.IUT.ID} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.EnvironmentRequest{}, logAreaProvider, func(rawObj client.Object) []string { + environmentRequest := rawObj.(*etosv1alpha1.EnvironmentRequest) + return []string{environmentRequest.Spec.Providers.LogArea.ID} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.EnvironmentRequest{}, executionSpaceProvider, func(rawObj client.Object) []string { + environmentRequest := rawObj.(*etosv1alpha1.EnvironmentRequest) + return []string{environmentRequest.Spec.Providers.ExecutionSpace.ID} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&etosv1alpha1.EnvironmentRequest{}). + Owns(&batchv1.Job{}). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findEnvironmentRequestsForIUTProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findEnvironmentRequestsForLogAreaProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findEnvironmentRequestsForExecutionSpaceProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Complete(r) +} diff --git a/internal/controller/environmentrequest_controller_test.go b/internal/controller/environmentrequest_controller_test.go new file mode 100644 index 00000000..a712853f --- /dev/null +++ b/internal/controller/environmentrequest_controller_test.go @@ -0,0 +1,84 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +var _ = Describe("EnvironmentRequest Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + environmentrequest := &etosv1alpha1.EnvironmentRequest{} + + BeforeEach(func() { + By("creating the custom resource for the Kind EnvironmentRequest") + err := k8sClient.Get(ctx, typeNamespacedName, environmentrequest) + if err != nil && errors.IsNotFound(err) { + resource := &etosv1alpha1.EnvironmentRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &etosv1alpha1.EnvironmentRequest{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance EnvironmentRequest") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &EnvironmentRequestReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/jobs.go b/internal/controller/jobs.go new file mode 100644 index 00000000..17a5db08 --- /dev/null +++ b/internal/controller/jobs.go @@ -0,0 +1,156 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type jobs struct { + activeJobs []*batchv1.Job + successfulJobs []*batchv1.Job + failedJobs []*batchv1.Job +} + +func (j jobs) active() bool { + return len(j.successfulJobs) == 0 && len(j.failedJobs) == 0 && len(j.activeJobs) > 0 +} + +func (j jobs) failed() bool { + return len(j.failedJobs) > 0 && len(j.activeJobs) == 0 +} + +func (j jobs) successful() bool { + return len(j.successfulJobs) > 0 && len(j.activeJobs) == 0 +} + +func (j jobs) empty() bool { + return len(j.successfulJobs) == 0 && len(j.failedJobs) == 0 && len(j.activeJobs) == 0 +} + +// jobStatus creates a job struct with jobs that are active, failed or successful. +func jobStatus(ctx context.Context, c client.Reader, namespace string, name string, ownerKey string) (*jobs, error) { + var active []*batchv1.Job + var successful []*batchv1.Job + var failed []*batchv1.Job + var joblist batchv1.JobList + + if err := c.List(ctx, &joblist, client.InNamespace(namespace), client.MatchingFields{ownerKey: name}); err != nil { + return &jobs{}, err + } + + for i, job := range joblist.Items { + _, finishedType := isJobFinished(job) + switch finishedType { + case "": // Ongoing + active = append(active, &joblist.Items[i]) + case batchv1.JobFailed: + failed = append(failed, &joblist.Items[i]) + case batchv1.JobComplete: + successful = append(successful, &joblist.Items[i]) + } + } + return &jobs{activeJobs: active, failedJobs: failed, successfulJobs: successful}, nil +} + +// isJobFinished checks if a job has status Complete or Failed. +func isJobFinished(job batchv1.Job) (bool, batchv1.JobConditionType) { + if IsJobStatusConditionPresentAndEqual(job.Status.Conditions, batchv1.JobComplete, corev1.ConditionTrue) { + return true, batchv1.JobComplete + } + if IsJobStatusConditionPresentAndEqual(job.Status.Conditions, batchv1.JobFailed, corev1.ConditionTrue) { + return true, batchv1.JobFailed + } + return false, "" +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsJobStatusConditionPresentAndEqual(conditions []batchv1.JobCondition, conditionType batchv1.JobConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +type ( + Conclusion string + Verdict string +) + +const ( + ConclusionSuccessful Conclusion = "Successful" + ConclusionFailed Conclusion = "Failed" + ConclusionAborted Conclusion = "Aborted" + ConclusionTimedOut Conclusion = "TimedOut" + ConclusionInconclusive Conclusion = "Inconclusive" +) + +const ( + VerdictPassed Verdict = "Passed" + VerdictFailed Verdict = "Failed" + VerdictInconclusive Verdict = "Inconclusive" + VerdictNone Verdict = "None" +) + +// Result describes the status and result of an ETOS job +type Result struct { + Conclusion Conclusion `json:"conclusion"` + Verdict Verdict `json:"verdict,omitempty"` + Description string `json:"description,omitempty"` +} + +// terminationLog reads the termination-log part of the ESR pod and returns it. +func terminationLog(ctx context.Context, c client.Reader, job *batchv1.Job) (*Result, error) { + logger := log.FromContext(ctx) + var pods corev1.PodList + if err := c.List(ctx, &pods, client.InNamespace(job.Namespace), client.MatchingLabels{"job-name": job.Name}); err != nil { + logger.Error(err, fmt.Sprintf("could not list pods for job %s", job.Name)) + return &Result{Conclusion: ConclusionFailed}, err + } + if len(pods.Items) == 0 { + return &Result{Conclusion: ConclusionFailed}, fmt.Errorf("no pods found for job %s", job.Name) + } + if len(pods.Items) > 1 { + // TODO: check specific + logger.Info("found more than 1 pod active. Will only check termination-log for the first one", "pod", pods.Items[0]) + } + pod := pods.Items[0] + + for _, status := range pod.Status.ContainerStatuses { + if status.Name == job.Name { + if status.State.Terminated == nil { + return &Result{Conclusion: ConclusionFailed}, errors.New("could not read termination log from pod") + } + var result Result + if err := json.Unmarshal([]byte(status.State.Terminated.Message), &result); err != nil { + logger.Error(err, "failed to unmarshal termination log to a result struct") + return &Result{Conclusion: ConclusionFailed, Description: status.State.Terminated.Message}, nil + } + return &result, nil + } + } + return &Result{Conclusion: ConclusionFailed}, errors.New("found no container status for pod") +} diff --git a/internal/controller/provider_controller.go b/internal/controller/provider_controller.go new file mode 100644 index 00000000..d4b713cf --- /dev/null +++ b/internal/controller/provider_controller.go @@ -0,0 +1,105 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +// ProviderReconciler reconciles a Provider object +type ProviderReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *ProviderReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + provider := &etosv1alpha1.Provider{} + err := r.Get(ctx, req.NamespacedName, provider) + if err != nil { + if apierrors.IsNotFound(err) { + logger.Info("provider not found. ignoring object") + return ctrl.Result{}, nil + } + logger.Error(err, "failed to get provider") + return ctrl.Result{}, err + } + logger.V(2).Info("Checking availability of provider", "provider", req.NamespacedName) + + // TODO: Schedule checks instead of doing it every time something happens + + // We don't check the availability of JSONTas as it is not yet running as a service we can check. + if provider.Spec.JSONTas == nil { + logger.V(2).Info("Healthcheck", "endpoint", fmt.Sprintf("%s/%s", provider.Spec.Host, provider.Spec.Healthcheck.Endpoint)) + resp, err := http.Get(fmt.Sprintf("%s/%s", provider.Spec.Host, provider.Spec.Healthcheck.Endpoint)) + if err != nil { + meta.SetStatusCondition(&provider.Status.Conditions, metav1.Condition{Type: StatusAvailable, Status: metav1.ConditionFalse, Reason: "Error", Message: "Could not communicate with host"}) + if err = r.Status().Update(ctx, provider); err != nil { + logger.Error(err, "failed to update provider status") + return ctrl.Result{}, err + } + logger.Info("Provider did not respond", "provider", req.NamespacedName) + return ctrl.Result{RequeueAfter: time.Duration(provider.Spec.Healthcheck.IntervalSeconds) * time.Second}, nil + } + if resp.StatusCode != 204 { + meta.SetStatusCondition(&provider.Status.Conditions, metav1.Condition{Type: StatusAvailable, Status: metav1.ConditionFalse, Reason: "Error", Message: fmt.Sprintf("Wrong status code (%d) from health check endpoint", resp.StatusCode)}) + if err = r.Status().Update(ctx, provider); err != nil { + logger.Error(err, "failed to update provider status") + return ctrl.Result{}, err + } + logger.Info("Provider responded with a bad status code", "provider", req.NamespacedName, "status", resp.StatusCode) + return ctrl.Result{RequeueAfter: time.Duration(provider.Spec.Healthcheck.IntervalSeconds) * time.Second}, nil + } + } + meta.SetStatusCondition(&provider.Status.Conditions, metav1.Condition{Type: StatusAvailable, Status: metav1.ConditionTrue, Reason: "OK", Message: "Provider is up and running"}) + if err = r.Status().Update(ctx, provider); err != nil { + logger.Error(err, "failed to update provider status") + return ctrl.Result{}, err + } + logger.V(2).Info("Provider is available", "provider", req.NamespacedName) + return ctrl.Result{RequeueAfter: time.Duration(provider.Spec.Healthcheck.IntervalSeconds) * time.Second}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ProviderReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&etosv1alpha1.Provider{}). + Complete(r) +} diff --git a/internal/controller/provider_controller_test.go b/internal/controller/provider_controller_test.go new file mode 100644 index 00000000..899f9c6f --- /dev/null +++ b/internal/controller/provider_controller_test.go @@ -0,0 +1,84 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +var _ = Describe("Provider Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + provider := &etosv1alpha1.Provider{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Provider") + err := k8sClient.Get(ctx, typeNamespacedName, provider) + if err != nil && errors.IsNotFound(err) { + resource := &etosv1alpha1.Provider{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &etosv1alpha1.Provider{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Provider") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ProviderReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/providers.go b/internal/controller/providers.go new file mode 100644 index 00000000..a78f1a96 --- /dev/null +++ b/internal/controller/providers.go @@ -0,0 +1,56 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package controller + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// checkProviders checks if all providers for this environment are available. +func checkProviders(ctx context.Context, c client.Reader, namespace string, providers etosv1alpha1.Providers) error { + err := checkProvider(ctx, c, providers.IUT, namespace, &etosv1alpha1.Provider{}) + if err != nil { + return err + } + err = checkProvider(ctx, c, providers.ExecutionSpace, namespace, &etosv1alpha1.Provider{}) + if err != nil { + return err + } + err = checkProvider(ctx, c, providers.LogArea, namespace, &etosv1alpha1.Provider{}) + if err != nil { + return err + } + return nil +} + +// checkProvider checks if the provider condition 'Available' is set to True. +func checkProvider(ctx context.Context, c client.Reader, name string, namespace string, provider *etosv1alpha1.Provider) error { + err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, provider) + if err != nil { + return err + } + if meta.IsStatusConditionPresentAndEqual(provider.Status.Conditions, StatusAvailable, metav1.ConditionTrue) { + return nil + } + return fmt.Errorf("Provider '%s' does not have a status field", name) +} diff --git a/internal/controller/status.go b/internal/controller/status.go new file mode 100644 index 00000000..78be61d0 --- /dev/null +++ b/internal/controller/status.go @@ -0,0 +1,10 @@ +package controller + +const ( + StatusAvailable = "Available" + StatusReady = "Ready" + StatusFailed = "Failed" + StatusActive = "Active" + StatusEnvironment = "Environment" + StatusSuiteRunner = "SuiteRunner" +) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go new file mode 100644 index 00000000..0ef759cc --- /dev/null +++ b/internal/controller/suite_test.go @@ -0,0 +1,91 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment +) + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.30.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = etosv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/controller/testrun_controller.go b/internal/controller/testrun_controller.go new file mode 100644 index 00000000..cf918b01 --- /dev/null +++ b/internal/controller/testrun_controller.go @@ -0,0 +1,897 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + ref "k8s.io/client-go/tools/reference" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +// TODO: Move Environment, EnvironmentRequestOwnerKey +var ( + TestRunOwnerKey = ".metadata.controller.suiterunner" + EnvironmentRequestOwnerKey = ".metadata.controller.environmentrequest" + EnvironmentOwnerKey = ".metadata.controller.environment" + APIGroupVersionString = etosv1alpha1.GroupVersion.String() + iutProvider = ".spec.providers.iut" + executionSpaceProvider = ".spec.providers.executionSpace" + logAreaProvider = ".spec.providers.logarea" +) + +// TestRunReconciler reconciles a TestRun object +type TestRunReconciler struct { + client.Client + Scheme *runtime.Scheme + Clock +} + +/* +We'll mock out the clock to make it easier to jump around in time while testing, +the "real" clock just calls `time.Now`. +*/ +type realClock struct{} + +func (_ realClock) Now() time.Time { return time.Now() } + +// Clock knows how to get the current time. +// It can be used to fake out timing for testing. +type Clock interface { + Now() time.Time +} + +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=testruns,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=testruns/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=testruns/finalizers,verbs=update +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments,verbs=get;watch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environments/status,verbs=get +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environmentrequests,verbs=get;watch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=environmentsrequests/status,verbs=get +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers,verbs=get;watch +// +kubebuilder:rbac:groups=etos.eiffel-community.github.io,resources=providers/status,verbs=get +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;delete +// +kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *TestRunReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger = logger.WithValues("namespace", req.Namespace, "name", req.Name) + + testrun := &etosv1alpha1.TestRun{} + if err := r.Get(ctx, req.NamespacedName, testrun); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Testrun not found, exiting") + return ctrl.Result{}, nil + } + logger.Error(err, "Error getting testrun") + return ctrl.Result{}, err + } + if testrun.Status.CompletionTime != nil { + testrunCondition := meta.FindStatusCondition(testrun.Status.Conditions, StatusActive) + var retention *metav1.Duration + if testrunCondition.Reason == "Successful" { + retention = testrun.Spec.Retention.Success + } else { + retention = testrun.Spec.Retention.Failure + } + if retention == nil { + logger.Info("No retention set, ignoring") + return ctrl.Result{}, nil + } + if testrun.Status.CompletionTime.Add(retention.Duration).Before(time.Now()) { + logger.Info(fmt.Sprintf("Testrun TTL(%s) reached, delete", retention.Duration)) + if err := r.Delete(ctx, testrun, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "Failed deletion. Ignoring any errors and won't retry") + } + } + } else { + next := testrun.Status.CompletionTime.Add(retention.Duration).Sub(r.Now()) + logger.Info(fmt.Sprintf("Testrun queued for deletion in %s", next)) + return ctrl.Result{RequeueAfter: next}, nil + } + return ctrl.Result{}, nil + } + + if err := r.reconcile(ctx, testrun); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Reconciliation failed for testrun", "namespace", req.Namespace, "name", req.Name) + return r.update(ctx, testrun, metav1.ConditionFalse, err.Error()) + } + + return ctrl.Result{}, nil +} + +func (r *TestRunReconciler) reconcile(ctx context.Context, testrun *etosv1alpha1.TestRun) error { + logger := log.FromContext(ctx) + + // Set initial statuses if not set. + if meta.FindStatusCondition(testrun.Status.Conditions, StatusActive) == nil { + meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Status: metav1.ConditionFalse, Type: StatusActive, Message: "Reconciliation started", Reason: "Pending"}) + return r.Status().Update(ctx, testrun) + } + if meta.FindStatusCondition(testrun.Status.Conditions, StatusSuiteRunner) == nil { + meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Status: metav1.ConditionFalse, Type: StatusSuiteRunner, Message: "Reconciliation started", Reason: "Pending"}) + return r.Status().Update(ctx, testrun) + } + if meta.FindStatusCondition(testrun.Status.Conditions, StatusEnvironment) == nil { + meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Status: metav1.ConditionFalse, Type: StatusEnvironment, Message: "Reconciliation started", Reason: "Pending"}) + return r.Status().Update(ctx, testrun) + } + + // Get active, finished and failed suite runners. + suiteRunners, err := jobStatus(ctx, r, testrun.Namespace, testrun.Name, TestRunOwnerKey) + if err != nil { + return err + } + + // Add active suite runners to testrun status + testrun.Status.SuiteRunners = nil + for _, activeSuiteRunner := range suiteRunners.activeJobs { + jobRef, err := ref.GetReference(r.Scheme, activeSuiteRunner) + if err != nil { + logger.Error(err, "failed to make reference to active suite runner", "suiterunner", activeSuiteRunner) + continue + } + testrun.Status.SuiteRunners = append(testrun.Status.SuiteRunners, *jobRef) + } + if err := r.Status().Update(ctx, testrun); err != nil { + return err + } + logger.V(1).Info("suite runner count", "active", len(suiteRunners.activeJobs), "successful", len(suiteRunners.successfulJobs), "failed", len(suiteRunners.failedJobs)) + + // Check providers availability + if err := checkProviders(ctx, r, testrun.Namespace, testrun.Spec.Providers); err != nil { + return err + } + + // Create environment request + err, exit := r.reconcileEnvironmentRequest(ctx, testrun) + if err != nil { + return err + } + if exit { + return nil + } + + // Check environment + if err := r.checkEnvironment(ctx, testrun); err != nil { + return err + } + + // Reconcile suite runners + if err := r.reconcileSuiteRunner(ctx, suiteRunners, testrun); err != nil { + return err + } + + // Set testrun status + if suiteRunners.active() { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionTrue, Reason: "Active", Message: "Waiting for suite runners to finish"}) { + return r.Status().Update(ctx, testrun) + } + } + if suiteRunners.failed() { + if err := r.complete(ctx, testrun, "Failed", "Suite runners failed to finish"); err != nil { + return err + } + } + if suiteRunners.successful() { + if err := r.complete(ctx, testrun, "Successful", "Suite runners finished successfully"); err != nil { + return err + } + } + + return nil +} + +// complete sets the completion time and active status on a testrun. +func (r *TestRunReconciler) complete(ctx context.Context, testrun *etosv1alpha1.TestRun, reason, message string) error { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusActive, Status: metav1.ConditionFalse, Reason: reason, Message: message}) { + testrunCondition := meta.FindStatusCondition(testrun.Status.Conditions, StatusActive) + testrun.Status.CompletionTime = &testrunCondition.LastTransitionTime + return r.Status().Update(ctx, testrun) + } + return nil +} + +// reconcileEnvironmentRequest will check the status of environment requests, create new ones if necessary. +func (r *TestRunReconciler) reconcileEnvironmentRequest(ctx context.Context, testrun *etosv1alpha1.TestRun) (error, bool) { + logger := log.FromContext(ctx) + var environmentRequestList etosv1alpha1.EnvironmentRequestList + if err := r.List(ctx, &environmentRequestList, client.InNamespace(testrun.Namespace), client.MatchingFields{TestRunOwnerKey: testrun.Name}); err != nil { + if !apierrors.IsNotFound(err) { + return err, true + } + } + testrun.Status.EnvironmentRequests = nil + for _, request := range environmentRequestList.Items { + reqRef, err := ref.GetReference(r.Scheme, &request) + if err != nil { + logger.Error(err, "failed to make reference to active environment request", "environmentrequest", request) + continue + } + testrun.Status.EnvironmentRequests = append(testrun.Status.EnvironmentRequests, *reqRef) + } + for _, suite := range testrun.Spec.Suites { + found := false + for _, request := range environmentRequestList.Items { + if request.Spec.Name == suite.Name { + found = true + } + } + if !found { + request := r.environmentRequest(testrun, suite) + if err := ctrl.SetControllerReference(testrun, request, r.Scheme); err != nil { + return err, true + } + logger.Info("Creating a new environment request", "request", request.Name) + if err := r.Create(ctx, request); err != nil { + return err, true + } + } + } + + for _, environmentRequest := range environmentRequestList.Items { + condition := meta.FindStatusCondition(environmentRequest.Status.Conditions, StatusReady) + if condition != nil && condition.Status == metav1.ConditionFalse && condition.Reason == "Failed" { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusEnvironment, Status: metav1.ConditionFalse, Reason: "Failed", Message: "Failed to create environment for test"}) { + return r.Status().Update(ctx, testrun), true + } + if err := r.complete(ctx, testrun, "Failed", condition.Message); err != nil { + return err, true + } + return nil, true + } else if condition != nil && condition.Status == metav1.ConditionFalse { + logger.Info("Environment request is not finished") + } + } + return nil, false +} + +// reconcileSuiteRunner will check the status of suite runners, create new ones if necessary. +func (r *TestRunReconciler) reconcileSuiteRunner(ctx context.Context, suiteRunners *jobs, testrun *etosv1alpha1.TestRun) error { + logger := log.FromContext(ctx) + // Suite runners failed, setting status. + if suiteRunners.failed() { + suiteRunner := suiteRunners.failedJobs[0] // TODO + result, err := terminationLog(ctx, r, suiteRunner) + if err != nil { + result.Description = err.Error() + } + logger.Info("Suite runner result", "verdict", result.Verdict, "conclusion", result.Conclusion, "message", result.Description) + if result.Verdict == "" { + result.Verdict = VerdictNone + } + testrun.Status.Verdict = string(result.Verdict) + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusSuiteRunner, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, testrun) + } + } + // Suite runners successful, setting status. + if suiteRunners.successful() { + suiteRunner := suiteRunners.successfulJobs[0] // TODO + result, err := terminationLog(ctx, r, suiteRunner) + if err != nil { + result.Description = err.Error() + } + logger.Info("Suite runner result", "verdict", result.Verdict, "conclusion", result.Conclusion, "message", result.Description) + if result.Verdict == "" { + result.Verdict = VerdictNone + } + testrun.Status.Verdict = string(result.Verdict) + if result.Conclusion == ConclusionFailed { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusSuiteRunner, Status: metav1.ConditionFalse, Reason: "Failed", Message: result.Description}) { + return r.Status().Update(ctx, testrun) + } + } + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusSuiteRunner, Status: metav1.ConditionFalse, Reason: "Done", Message: "Suite runner finished"}) { + for _, suiteRunner := range suiteRunners.successfulJobs { + // TODO: Deletion should probably be done outside of this function + if err := r.Delete(ctx, suiteRunner, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + if err = r.deleteEnvironmentRequests(ctx, testrun); err != nil { + return err + } + } + return r.Status().Update(ctx, testrun) + } + } + // Suite runners active, setting status + if suiteRunners.active() { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusSuiteRunner, Status: metav1.ConditionTrue, Reason: "Running", Message: "Suite runner is running"}) { + testrun.Status.Verdict = string(VerdictNone) + logger.Info("Setting suiterunner (active) true") + return r.Status().Update(ctx, testrun) + } + } + // No suite runners, create suite runner + if suiteRunners.empty() { + tercc, err := json.Marshal(testrun.Spec.Suites) + if err != nil { + return err + } + suiteRunner := r.suiteRunnerJob(tercc, testrun) + if err := ctrl.SetControllerReference(testrun, suiteRunner, r.Scheme); err != nil { + return err + } + if err := r.Create(ctx, suiteRunner); err != nil { + return err + } + } + return nil +} + +// deleteEnvironmentRequests will delete all environment requests that are a part of a testrun. +func (r *TestRunReconciler) deleteEnvironmentRequests(ctx context.Context, testrun *etosv1alpha1.TestRun) error { + var environmentRequestList etosv1alpha1.EnvironmentRequestList + if err := r.List(ctx, &environmentRequestList, client.InNamespace(testrun.Namespace), client.MatchingFields{TestRunOwnerKey: testrun.Name}); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + for _, environmentRequest := range environmentRequestList.Items { + if err := r.Delete(ctx, &environmentRequest, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } + } + return nil +} + +// update will set the status condition and update the status of the ETOS testrun. +// if the update fails due to conflict the reconciliation will requeue. +func (r *TestRunReconciler) update(ctx context.Context, testrun *etosv1alpha1.TestRun, status metav1.ConditionStatus, message string) (ctrl.Result, error) { + // TODO: Verify this function. + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusActive, Status: status, Reason: "Active", Message: message}) { + if err := r.Status().Update(ctx, testrun); err != nil { + if apierrors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +// checkEnvironment will check the status of the environment used for test. +func (r *TestRunReconciler) checkEnvironment(ctx context.Context, testrun *etosv1alpha1.TestRun) error { + logger := log.FromContext(ctx) + logger.Info("Checking environment") + var environments etosv1alpha1.EnvironmentList + if err := r.List(ctx, &environments, client.InNamespace(testrun.Namespace), client.MatchingFields{TestRunOwnerKey: testrun.Name}); err != nil { + return err + } + logger.Info("Listed environments", "environments", len(environments.Items)) + // TODO: this only checks one environment, not all of them if there are many + if len(environments.Items) > 0 { + if meta.SetStatusCondition(&testrun.Status.Conditions, metav1.Condition{Type: StatusEnvironment, Status: metav1.ConditionTrue, Reason: "Ready", Message: "Environment ready"}) { + logger.Info("Set condition") + return r.Status().Update(ctx, testrun) + } + } + return nil +} + +// environmentRequest is the definition for an environment request. +func (r TestRunReconciler) environmentRequest(testrun *etosv1alpha1.TestRun, suite etosv1alpha1.Suite) *etosv1alpha1.EnvironmentRequest { + return &etosv1alpha1.EnvironmentRequest{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "etos.eiffel-community.github.io/id": testrun.Spec.ID, + "etos.eiffel-community.github.io/cluster": testrun.Spec.Cluster, + "app.kubernetes.io/name": "suite-runner", + "app.kubernetes.io/part-of": "etos", + }, + Annotations: make(map[string]string), + GenerateName: fmt.Sprintf("%s-", testrun.Name), + Namespace: testrun.Namespace, + }, + Spec: etosv1alpha1.EnvironmentRequestSpec{ + ID: string(uuid.NewUUID()), + Name: suite.Name, + Identifier: testrun.Spec.ID, + Artifact: testrun.Spec.Artifact, + Identity: testrun.Spec.Identity, + MinimumAmount: 1, + MaximumAmount: len(suite.Tests), + Dataset: suite.Dataset, + Providers: etosv1alpha1.EnvironmentProviders{ + IUT: etosv1alpha1.IutProvider{ + ID: testrun.Spec.Providers.IUT, + }, + ExecutionSpace: etosv1alpha1.ExecutionSpaceProvider{ + ID: testrun.Spec.Providers.ExecutionSpace, + TestRunner: testrun.Spec.TestRunner.Version, + }, + LogArea: etosv1alpha1.LogAreaProvider{ + ID: testrun.Spec.Providers.LogArea, + }, + }, + Splitter: etosv1alpha1.Splitter{ + Tests: suite.Tests, + }, + Image: testrun.Spec.EnvironmentProvider.Image, + }, + } +} + +// suiteRunnerJob is the job definition for an etos suite runner. +func (r TestRunReconciler) suiteRunnerJob(tercc []byte, testrun *etosv1alpha1.TestRun) *batchv1.Job { + ttl := int32(300) + grace := int64(30) + backoff := int32(0) + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + // TODO: remove these two first ones + "id": testrun.Spec.ID, + "app": "suite-runner", + "etos.eiffel-community.github.io/id": testrun.Spec.ID, + "app.kubernetes.io/name": "suite-runner", + "app.kubernetes.io/part-of": "etos", + }, + Annotations: make(map[string]string), + Name: testrun.Name, + Namespace: testrun.Namespace, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &ttl, + BackoffLimit: &backoff, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: testrun.Name, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "kubexit", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "graveyard", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}, + }, + }, + }, + TerminationGracePeriodSeconds: &grace, + ServiceAccountName: fmt.Sprintf("%s-provider", testrun.Spec.Cluster), + RestartPolicy: "Never", + InitContainers: []corev1.Container{ + { + Name: "kubexit", + Image: "karlkfi/kubexit:latest", + Command: []string{"cp", "/bin/kubexit", "/kubexit/kubexit"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "kubexit", + MountPath: "/kubexit", + }, + }, + }, + { + Name: "create-queue", + Image: testrun.Spec.LogListener.Image.Image, + ImagePullPolicy: testrun.Spec.LogListener.ImagePullPolicy, + Command: []string{"python", "-u", "-m", "create_queue"}, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: testrun.Spec.Cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-messagebus", testrun.Spec.Cluster), + }, + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "IDENTIFIER", + Value: testrun.Spec.ID, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: testrun.Name, + Image: testrun.Spec.SuiteRunner.Image.Image, + ImagePullPolicy: testrun.Spec.SuiteRunner.ImagePullPolicy, + Command: []string{"/kubexit/kubexit"}, + Args: []string{"python", "-u", "-m", "etos_suite_runner"}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("250m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: testrun.Spec.Cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: testrun.Spec.Cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-rabbitmq", testrun.Spec.Cluster), + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-messagebus", testrun.Spec.Cluster), + }, + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "TERCC", + Value: string(tercc), + }, + { + Name: "ARTIFACT", + Value: testrun.Spec.Artifact, + }, + { + Name: "IDENTITY", + Value: testrun.Spec.Identity, + }, + { + Name: "TESTRUN", + Value: testrun.Name, + }, + { + Name: "IDENTIFIER", + Value: testrun.Spec.ID, + }, + { + Name: "KUBEXIT_NAME", + Value: "esr", + }, + { + Name: "KUBEXIT_GRAVEYARD", + Value: "/graveyard", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "graveyard", + MountPath: "/graveyard", + }, + { + Name: "kubexit", + MountPath: "/kubexit", + }, + }, + }, + { + Name: "etos-log-listener", + Image: testrun.Spec.LogListener.Image.Image, + ImagePullPolicy: testrun.Spec.LogListener.ImagePullPolicy, + Command: []string{"/kubexit/kubexit"}, + Args: []string{"python", "-u", "-m", "log_listener"}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("250m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: testrun.Spec.Cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: testrun.Spec.Cluster, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: fmt.Sprintf("%s-messagebus", testrun.Spec.Cluster), + }, + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "IDENTIFIER", + Value: testrun.Spec.ID, + }, + { + Name: "KUBEXIT_NAME", + Value: "log_listener", + }, + { + Name: "KUBEXIT_GRAVE_PERIOD", + Value: "400s", + }, + { + Name: "KUBEXIT_GRAVEYARD", + Value: "/graveyard", + }, + { + Name: "KUBEXIT_DEATH_DEPS", + Value: "esr", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "graveyard", + MountPath: "/graveyard", + }, + { + Name: "kubexit", + MountPath: "/kubexit", + }, + }, + }, + }, + }, + }, + }, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *TestRunReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Set up real clock, since we are not in a test + if r.Clock == nil { + r.Clock = realClock{} + } + + // Register indexes for faster lookups + if err := r.registerOwnerIndexForJob(mgr); err != nil { + return err + } + if err := r.registerOwnerIndexForEnvironment(mgr); err != nil { + return err + } + if err := r.registerOwnerIndexForEnvironmentRequest(mgr); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.TestRun{}, iutProvider, func(rawObj client.Object) []string { + testrun := rawObj.(*etosv1alpha1.TestRun) + return []string{testrun.Spec.Providers.IUT} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.TestRun{}, logAreaProvider, func(rawObj client.Object) []string { + testrun := rawObj.(*etosv1alpha1.TestRun) + return []string{testrun.Spec.Providers.LogArea} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.TestRun{}, executionSpaceProvider, func(rawObj client.Object) []string { + testrun := rawObj.(*etosv1alpha1.TestRun) + return []string{testrun.Spec.Providers.ExecutionSpace} + }); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). + For(&etosv1alpha1.TestRun{}). + Owns(&batchv1.Job{}). + Owns(&etosv1alpha1.EnvironmentRequest{}). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findTestrunsForIUTProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findTestrunsForExecutionSpaceProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &etosv1alpha1.Provider{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findTestrunsForLogAreaProvider), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &etosv1alpha1.Environment{}, + handler.TypedEnqueueRequestsFromMapFunc(r.findTestrunsForEnvironment), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Complete(r) +} + +// registerOwnerIndexForJob will set an index of the suite runner jobs that a testrun owns. +func (r *TestRunReconciler) registerOwnerIndexForJob(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &batchv1.Job{}, TestRunOwnerKey, func(rawObj client.Object) []string { + job := rawObj.(*batchv1.Job) + owner := metav1.GetControllerOf(job) + if owner == nil { + return nil + } + if owner.APIVersion != APIGroupVersionString || owner.Kind != "TestRun" { + return nil + } + + return []string{owner.Name} + }); err != nil { + return err + } + return nil +} + +// registerOwnerIndexForEnvironment will set an index of the environments that a testrun owns. +func (r *TestRunReconciler) registerOwnerIndexForEnvironment(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.Environment{}, TestRunOwnerKey, func(rawObj client.Object) []string { + environment := rawObj.(*etosv1alpha1.Environment) + owner := metav1.GetControllerOf(environment) + if owner == nil { + return nil + } + if owner.APIVersion != APIGroupVersionString || owner.Kind != "TestRun" { + return nil + } + return []string{owner.Name} + }); err != nil { + return err + } + return nil +} + +// registerOwnerIndexForEnvironmentRequest will set an index of the environment requests that a testrun owns. +func (r *TestRunReconciler) registerOwnerIndexForEnvironmentRequest(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &etosv1alpha1.EnvironmentRequest{}, TestRunOwnerKey, func(rawObj client.Object) []string { + environmentRequest := rawObj.(*etosv1alpha1.EnvironmentRequest) + owner := metav1.GetControllerOf(environmentRequest) + if owner == nil { + return nil + } + if owner.APIVersion != APIGroupVersionString || owner.Kind != "TestRun" { + return nil + } + + return []string{owner.Name} + }); err != nil { + return err + } + return nil +} + +// findTestrunsForIUTProvider will return reconciliation requests for each Provider object that a testrun has stored +// in its spec as IUT. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *TestRunReconciler) findTestrunsForIUTProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findTestrunsForProvider(ctx, iutProvider, provider) +} + +// findTestrunsForIUTProvider will return reconciliation requests for each Provider object that a testrun has stored +// in its spec as execution space. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *TestRunReconciler) findTestrunsForExecutionSpaceProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findTestrunsForProvider(ctx, executionSpaceProvider, provider) +} + +// findTestrunsForIUTProvider will return reconciliation requests for each Provider object that a testrun has stored +// in its spec as log area. This will cause reconciliations whenever a Provider gets updated, created, deleted etc. +func (r *TestRunReconciler) findTestrunsForLogAreaProvider(ctx context.Context, provider client.Object) []reconcile.Request { + return r.findTestrunsForProvider(ctx, logAreaProvider, provider) +} + +// findTestrunsForProvider will find testruns for a providerName. +func (r *TestRunReconciler) findTestrunsForProvider(ctx context.Context, providerName string, provider client.Object) []reconcile.Request { + testrunList := &etosv1alpha1.TestRunList{} + listOps := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(providerName, provider.GetName()), + Namespace: provider.GetNamespace(), + } + err := r.List(ctx, testrunList, listOps) + if err != nil { + return []reconcile.Request{} + } + + requests := make([]reconcile.Request, len(testrunList.Items)) + for i, item := range testrunList.Items { + requests[i] = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + } + } + return requests +} + +// findTestrunsForEnvironment will return reconciliation requests for Environment objects with a specific testrun as +// owner. +func (r *TestRunReconciler) findTestrunsForEnvironment(ctx context.Context, environment client.Object) []reconcile.Request { + // TODO: Since we are setting controller to false when creating Environment in the environment provider + // we need to find the TestRun owner in another way. This way is much more insecure, but since we are going + // to create environments from the controllers later we wuill be able to fix this to be more in line with + // how registerOwnerIndexForJob does it. + refs := environment.GetOwnerReferences() + for i := range refs { + if refs[i].APIVersion == APIGroupVersionString && refs[i].Kind == "TestRun" { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: refs[i].Name, + Namespace: environment.GetNamespace(), + }, + }, + } + } + } + return nil +} diff --git a/internal/controller/testrun_controller_test.go b/internal/controller/testrun_controller_test.go new file mode 100644 index 00000000..1fe1130b --- /dev/null +++ b/internal/controller/testrun_controller_test.go @@ -0,0 +1,84 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" +) + +var _ = Describe("TestRun Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + testrun := &etosv1alpha1.TestRun{} + + BeforeEach(func() { + By("creating the custom resource for the Kind TestRun") + err := k8sClient.Get(ctx, typeNamespacedName, testrun) + if err != nil && errors.IsNotFound(err) { + resource := &etosv1alpha1.TestRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &etosv1alpha1.TestRun{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance TestRun") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &TestRunReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/etos/api/api.go b/internal/etos/api/api.go new file mode 100644 index 00000000..055732f2 --- /dev/null +++ b/internal/etos/api/api.go @@ -0,0 +1,434 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + ApiServicePort int32 = 80 + apiPort int32 = 8080 +) + +type ETOSApiDeployment struct { + etosv1alpha1.ETOSAPI + client.Client + Scheme *runtime.Scheme + rabbitmqSecret string + messagebusSecret string + configmap string +} + +// NewETOSApiDeployment will create a new ETOS API reconciler. +func NewETOSApiDeployment(spec etosv1alpha1.ETOSAPI, scheme *runtime.Scheme, client client.Client, rabbitmqSecret string, messagebusSecret string, configmap string) *ETOSApiDeployment { + return &ETOSApiDeployment{spec, client, scheme, rabbitmqSecret, messagebusSecret, configmap} +} + +// Reconcile will reconcile the ETOS API to its expected state. +func (r *ETOSApiDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + var err error + name := fmt.Sprintf("%s-etos-api", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + + _, err = r.reconcileDeployment(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileSecret(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileRole(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileServiceAccount(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileRolebinding(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + return nil +} + +// reconcileDeployment will reconcile the ETOS API deployment to its expected state. +func (r *ETOSApiDeployment) reconcileDeployment(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*appsv1.Deployment, error) { + target := r.deployment(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + deployment := &appsv1.Deployment{} + if err := r.Get(ctx, name, deployment); err != nil { + if !apierrors.IsNotFound(err) { + return deployment, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Spec, deployment.Spec) { + return deployment, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(deployment)) +} + +// reconcileSecret will reconcile the ETOS API service account secret to its expected state. +func (r *ETOSApiDeployment) reconcileSecret(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Secret, error) { + target := r.secret(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + secret := &corev1.Secret{} + if err := r.Get(ctx, name, secret); err != nil { + if !apierrors.IsNotFound(err) { + return secret, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Data, secret.Data) { + return secret, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(secret)) +} + +// reconcileRole will reconcile the ETOS API service account role to its expected state. +func (r *ETOSApiDeployment) reconcileRole(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.Role, error) { + labelName := name.Name + name.Name = fmt.Sprintf("%s:sa:esr-handler", name.Name) + + target := r.role(name, labelName) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + role := &rbacv1.Role{} + if err := r.Get(ctx, name, role); err != nil { + if !apierrors.IsNotFound(err) { + return role, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(role)) +} + +// reconcileServiceAccount will reconcile the ETOS API service account to its expected state. +func (r *ETOSApiDeployment) reconcileServiceAccount(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.ServiceAccount, error) { + target := r.serviceaccount(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + serviceaccount := &corev1.ServiceAccount{} + if err := r.Get(ctx, name, serviceaccount); err != nil { + if !apierrors.IsNotFound(err) { + return serviceaccount, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(serviceaccount)) +} + +// reconcileRolebinding will reconcile the ETOS API service account role binding to its expected state. +func (r *ETOSApiDeployment) reconcileRolebinding(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.RoleBinding, error) { + target := r.rolebinding(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + rolebinding := &rbacv1.RoleBinding{} + if err := r.Get(ctx, name, rolebinding); err != nil { + if !apierrors.IsNotFound(err) { + return rolebinding, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(rolebinding)) +} + +// reconcileService will reconcile the ETOS API service to its expected state. +func (r *ETOSApiDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// secret creates a secret resource definition for the ETOS API. +func (r *ETOSApiDeployment) secret(name types.NamespacedName) *corev1.Secret { + meta := r.meta(name) + meta.Annotations["kubernetes.io/service-account.name"] = name.Name + name.Name = fmt.Sprintf("%s-token", name.Name) + return &corev1.Secret{ + ObjectMeta: meta, + Type: corev1.SecretTypeServiceAccountToken, + } +} + +// role creates a role resource definition for the ETOS API. +func (r *ETOSApiDeployment) role(name types.NamespacedName, labelName string) *rbacv1.Role { + meta := r.meta(types.NamespacedName{Name: labelName, Namespace: name.Namespace}) + meta.Name = name.Name + meta.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "true" + return &rbacv1.Role{ + ObjectMeta: meta, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "batch", + }, + Resources: []string{ + "jobs", + }, + Verbs: []string{ + "get", "delete", "list", "watch", + }, + }, + { + APIGroups: []string{"etos.eiffel-community.github.io"}, + Resources: []string{ + "testruns", + }, + Verbs: []string{ + "create", "get", "delete", "list", "watch", "deletecollection", + }, + }, + { + APIGroups: []string{"etos.eiffel-community.github.io"}, + Resources: []string{ + "environments", + }, + Verbs: []string{ + "get", "list", "watch", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "pods", + }, + Verbs: []string{ + "get", "list", "watch", + }, + }, + }, + } +} + +// serviceaccount creates a service account resource definition for the ETOS API. +func (r *ETOSApiDeployment) serviceaccount(name types.NamespacedName) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: r.meta(name), + } +} + +// rolebinding creates a rolebinding resource definition for the ETOS API. +func (r *ETOSApiDeployment) rolebinding(name types.NamespacedName) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: r.meta(name), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf("%s:sa:esr-handler", name.Name), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: name.Name, + }, + }, + } +} + +// deployment creates a deployment resource definition for the ETOS API. +func (r *ETOSApiDeployment) deployment(name types.NamespacedName) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: r.meta(name), + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "api", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + ServiceAccountName: name.Name, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// service creates a service resource definition for the ETOS API. +func (r *ETOSApiDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "api", + }, + }, + } +} + +// container creates the container resource for the ETOS API deployment. +func (r *ETOSApiDeployment) container(name types.NamespacedName) corev1.Container { + probe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/selftest/ping", + Port: intstr.FromString("http"), + Scheme: "HTTP", + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + } + return corev1.Container{ + Name: name.Name, + Image: r.Image.Image, + ImagePullPolicy: r.ImagePullPolicy, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: apiPort, + Protocol: "TCP", + }, + }, + LivenessProbe: probe, + ReadinessProbe: probe, + EnvFrom: r.environment(), + } +} + +// environment creates the environment resource for the ETOS API deployment. +func (r *ETOSApiDeployment) environment() []corev1.EnvFromSource { + return []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.rabbitmqSecret, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.messagebusSecret, + }, + }, + }, + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.configmap, + }, + }, + }, + } +} + +// meta creates the common meta resource for the ETOS API deployment. +func (r *ETOSApiDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "api", + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// ports creates the port resource for the ETOS API service. +func (r *ETOSApiDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: ApiServicePort, Name: "http", Protocol: "TCP", TargetPort: intstr.FromString("http")}, + } +} diff --git a/internal/etos/api/logarea.go b/internal/etos/api/logarea.go new file mode 100644 index 00000000..bd209a77 --- /dev/null +++ b/internal/etos/api/logarea.go @@ -0,0 +1,274 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + logAreaPort int32 = 8080 + LogAreaServicePort int32 = 80 +) + +type ETOSLogAreaDeployment struct { + etosv1alpha1.ETOSLogArea + client.Client + Scheme *runtime.Scheme +} + +// NewETOSLogAreaDeployment will create a new ETOS logarea reconciler. +func NewETOSLogAreaDeployment(spec etosv1alpha1.ETOSLogArea, scheme *runtime.Scheme, client client.Client) *ETOSLogAreaDeployment { + return &ETOSLogAreaDeployment{spec, client, scheme} +} + +// Reconcile will reconcile the ETOS logarea to its expected state. +func (r *ETOSLogAreaDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + var err error + name := fmt.Sprintf("%s-etos-logarea", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + + _, err = r.reconcileDeployment(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileServiceAccount(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + return nil +} + +// reconcileDeployment will reconcile the ETOS logarea deployment to its expected state. +func (r *ETOSLogAreaDeployment) reconcileDeployment(ctx context.Context, name types.NamespacedName, cluster *etosv1alpha1.Cluster) (*appsv1.Deployment, error) { + target := r.deployment(name, cluster) + if err := ctrl.SetControllerReference(cluster, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + deployment := &appsv1.Deployment{} + if err := r.Get(ctx, name, deployment); err != nil { + if !apierrors.IsNotFound(err) { + return deployment, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Spec, deployment.Spec) { + return deployment, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(deployment)) +} + +// reconcileServiceAccount will reconcile the ETOS logarea service account to its expected state. +func (r *ETOSLogAreaDeployment) reconcileServiceAccount(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.ServiceAccount, error) { + target := r.serviceaccount(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + serviceaccount := &corev1.ServiceAccount{} + if err := r.Get(ctx, name, serviceaccount); err != nil { + if !apierrors.IsNotFound(err) { + return serviceaccount, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(serviceaccount)) +} + +// reconcileService will reconcile the ETOS logarea service to its expected state. +func (r *ETOSLogAreaDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// serviceaccount creates a service account resource definition for the ETOS logarea. +func (r *ETOSLogAreaDeployment) serviceaccount(name types.NamespacedName) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: r.meta(name), + } +} + +// deployment creates a deployment resource definition for the ETOS logarea. +func (r *ETOSLogAreaDeployment) deployment(name types.NamespacedName, cluster *etosv1alpha1.Cluster) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: r.meta(name), + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "logarea", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + ServiceAccountName: name.Name, + Containers: []corev1.Container{r.container(name, cluster)}, + }, + }, + }, + } +} + +// service creates a service resource definition for the ETOS logarea. +func (r *ETOSLogAreaDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "logarea", + }, + }, + } +} + +// container creates the container resource for the ETOS logarea deployment. +func (r *ETOSLogAreaDeployment) container(name types.NamespacedName, cluster *etosv1alpha1.Cluster) corev1.Container { + probe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/logarea/v1alpha/selftest/ping", + Port: intstr.FromString("http"), + Scheme: "HTTP", + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + } + return corev1.Container{ + Name: name.Name, + Image: r.Image.Image, + ImagePullPolicy: r.ImagePullPolicy, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("64Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: logAreaPort, + Protocol: "TCP", + }, + }, + LivenessProbe: probe, + ReadinessProbe: probe, + Env: r.environment(cluster), + EnvFrom: []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + }, + }, + }, + } +} + +// environment creates the environment resource for the ETOS logarea deployment. +func (r *ETOSLogAreaDeployment) environment(cluster *etosv1alpha1.Cluster) []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "SERVICE_HOST", + Value: "0.0.0.0", + }, + { + Name: "ETOS_ETCD_HOST", + Value: cluster.Spec.Database.Etcd.Host, + }, + { + Name: "ETOS_ETCD_PORT", + Value: cluster.Spec.Database.Etcd.Port, + }, + { + Name: "STRIP_PREFIX", + Value: "/logarea", + }, + } +} + +// meta creates the common meta resource for the ETOS logarea deployment. +func (r *ETOSLogAreaDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "logarea", + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// ports creates the port resource for the ETOS logarea service. +func (r *ETOSLogAreaDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: LogAreaServicePort, Name: "http", Protocol: "TCP", TargetPort: intstr.FromString("http")}, + } +} diff --git a/internal/etos/api/sse.go b/internal/etos/api/sse.go new file mode 100644 index 00000000..b47e6151 --- /dev/null +++ b/internal/etos/api/sse.go @@ -0,0 +1,361 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + ssePort int32 = 8080 + SSEServicePort int32 = 80 +) + +type ETOSSSEDeployment struct { + etosv1alpha1.ETOSSSE + client.Client + Scheme *runtime.Scheme +} + +// NewETOSSSEDeployment will create a new ETOS SSE reconciler. +func NewETOSSSEDeployment(spec etosv1alpha1.ETOSSSE, scheme *runtime.Scheme, client client.Client) *ETOSSSEDeployment { + return &ETOSSSEDeployment{spec, client, scheme} +} + +// Reconcile will reconcile the ETOS SSE service to its expected state. +func (r *ETOSSSEDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + var err error + name := fmt.Sprintf("%s-etos-sse", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + + _, err = r.reconcileDeployment(ctx, namespacedName, cluster) + if err != nil { + return err + } + + _, err = r.reconcileRole(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileServiceAccount(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileRolebinding(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + return nil +} + +// reconcileDeployment will reconcile the ETOS SSE deployment to its expected state. +func (r *ETOSSSEDeployment) reconcileDeployment(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*appsv1.Deployment, error) { + target := r.deployment(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + deployment := &appsv1.Deployment{} + if err := r.Get(ctx, name, deployment); err != nil { + if !apierrors.IsNotFound(err) { + return deployment, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Spec, deployment.Spec) { + return deployment, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(deployment)) +} + +// reconcileRole will reconcile the ETOS SSE service account role to its expected state. +func (r *ETOSSSEDeployment) reconcileRole(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.Role, error) { + labelName := name.Name + name.Name = fmt.Sprintf("%s:sa:esr-reader", name.Name) + + target := r.role(name, labelName) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + role := &rbacv1.Role{} + if err := r.Get(ctx, name, role); err != nil { + if !apierrors.IsNotFound(err) { + return role, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(role)) +} + +// reconcileServiceAccount will reconcile the ETOS SSE service account to its expected state. +func (r *ETOSSSEDeployment) reconcileServiceAccount(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.ServiceAccount, error) { + target := r.serviceaccount(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + serviceaccount := &corev1.ServiceAccount{} + if err := r.Get(ctx, name, serviceaccount); err != nil { + if !apierrors.IsNotFound(err) { + return serviceaccount, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(serviceaccount)) +} + +// reconcileRolebinding will reconcile the ETOS SSE service account rolebinding to its expected state. +func (r *ETOSSSEDeployment) reconcileRolebinding(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.RoleBinding, error) { + target := r.rolebinding(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + rolebinding := &rbacv1.RoleBinding{} + if err := r.Get(ctx, name, rolebinding); err != nil { + if !apierrors.IsNotFound(err) { + return rolebinding, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(rolebinding)) +} + +// reconcileService will reconcile the ETOS SSE service to its expected state. +func (r *ETOSSSEDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// role creates a role resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) role(name types.NamespacedName, labelName string) *rbacv1.Role { + meta := r.meta(types.NamespacedName{Name: labelName, Namespace: name.Namespace}) + meta.Name = name.Name + meta.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "true" + return &rbacv1.Role{ + ObjectMeta: meta, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "batch", + }, + Resources: []string{ + "jobs", + }, + Verbs: []string{ + "get", "list", "watch", + }, + }, + { + APIGroups: []string{""}, + Resources: []string{ + "pods", + }, + Verbs: []string{ + "get", "list", "watch", + }, + }, + }, + } +} + +// serviceaccount creates a serviceaccount resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) serviceaccount(name types.NamespacedName) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: r.meta(name), + } +} + +// rolebinding creates a rolebinding resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) rolebinding(name types.NamespacedName) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: r.meta(name), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf("%s:sa:esr-reader", name.Name), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: name.Name, + }, + }, + } +} + +// deployment creates a deployment resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) deployment(name types.NamespacedName) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: r.meta(name), + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "sse", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + ServiceAccountName: name.Name, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// service creates a service resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "sse", + }, + }, + } +} + +// container creates a container resource definition for the ETOS SSE deployment. +func (r *ETOSSSEDeployment) container(name types.NamespacedName) corev1.Container { + probe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/sse/v1alpha/selftest/ping", + Port: intstr.FromString("http"), + Scheme: "HTTP", + }, + }, + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + } + return corev1.Container{ + Name: name.Name, + Image: r.Image.Image, + ImagePullPolicy: r.ImagePullPolicy, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: ssePort, + Protocol: "TCP", + }, + }, + LivenessProbe: probe, + ReadinessProbe: probe, + Env: r.environment(), + } +} + +// environment creates an environment resource definition for the ETOS SSE deployment. +func (r *ETOSSSEDeployment) environment() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "SERVICE_HOST", + Value: "0.0.0.0", + }, + { + Name: "STRIP_PREFIX", + Value: "/sse", + }, + } +} + +// meta creates a common meta resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + "app.kubernetes.io/component": "sse", + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// ports creates a service port resource definition for the ETOS SSE service. +func (r *ETOSSSEDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: SSEServicePort, Name: "http", Protocol: "TCP", TargetPort: intstr.FromString("http")}, + } +} diff --git a/internal/etos/database.go b/internal/etos/database.go new file mode 100644 index 00000000..980556af --- /dev/null +++ b/internal/etos/database.go @@ -0,0 +1,327 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etos + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + etcdClientPort int32 = 2379 + etcdPeerPort int32 = 2380 + etcdReplicas int32 = 3 +) + +type ETCDDeployment struct { + *etosv1alpha1.Database + client.Client + Scheme *runtime.Scheme +} + +// NewETCDDeployment will create a new ETCD reconciler. +func NewETCDDeployment(spec *etosv1alpha1.Database, scheme *runtime.Scheme, client client.Client) *ETCDDeployment { + return &ETCDDeployment{spec, client, scheme} +} + +// Reconcile will reconcile ETCD to its expected state. +func (r *ETCDDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + logger := log.FromContext(ctx) + name := fmt.Sprintf("%s-etcd", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + if r.Deploy { + logger.Info("Patching host when deploying etcd", "host", fmt.Sprintf("%s-client", name)) + r.Etcd.Host = fmt.Sprintf("%s-client", name) + } + + _, err := r.reconcileStatefulset(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileClientService(ctx, namespacedName, cluster) + if err != nil { + return err + } + + return nil +} + +// reconcileStatefulset will reconcile the ETCD statefulset to its expected state. +func (r *ETCDDeployment) reconcileStatefulset(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*appsv1.StatefulSet, error) { + target := r.statefulset(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + etcd := &appsv1.StatefulSet{} + if err := r.Get(ctx, name, etcd); err != nil { + if !apierrors.IsNotFound(err) { + return etcd, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, etcd) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(etcd)) +} + +// reconcileService will reconcile the ETCD service to its expected state. +func (r *ETCDDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.headlessService(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, service) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// reconcileClientService will reconcile the ETCD client service to its expected state. +func (r *ETCDDeployment) reconcileClientService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + labelName := name.Name + name.Name = fmt.Sprintf("%s-client", name.Name) + target := r.service(name, labelName) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, service) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// statefulset creates a statefulset resource definition for ETCD. +func (r *ETCDDeployment) statefulset(name types.NamespacedName) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: r.meta(name), + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + }, + }, + ServiceName: name.Name, + Replicas: &etcdReplicas, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{r.volumeClaim(name)}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{r.volume(name)}, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// headlessService creates a headless service resource definition for ETCD. +func (r *ETCDDeployment) headlessService(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + ClusterIP: "None", + Selector: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + }, + }, + } +} + +// service creates a service resource definition for ETCD. +func (r *ETCDDeployment) service(name types.NamespacedName, labelName string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.clientPorts(), + Selector: map[string]string{ + "app.kubernetes.io/name": labelName, + "app.kubernetes.io/part-of": "etos", + }, + }, + } +} + +// meta creates a common meta resource definition for ETCD. +func (r *ETCDDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + "app.kubernetes.io/part-of": "etos", + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// volumeClaim creates a volume claim resource definition for the ETCD statefulset. +func (r *ETCDDeployment) volumeClaim(name types.NamespacedName) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-data", name.Name), + Namespace: name.Namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, + }, + }, + } +} + +// volume creates a volume resource definition for the ETCD statefulset. +func (r *ETCDDeployment) volume(name types.NamespacedName) corev1.Volume { + return corev1.Volume{ + Name: fmt.Sprintf("%s-data", name.Name), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("%s-data", name.Name), + }, + }, + } +} + +// container creates a container resource definition for the ETCD statefulset. +func (r *ETCDDeployment) container(name types.NamespacedName) corev1.Container { + // Example peers with a cluster name of 'cluster-sample': + // cluster-sample-etcd-0=http://cluster-sample-etcd-0.cluster-sample-etcd:2380,cluster-sample-etcd-1=http://cluster-sample-etcd-1.cluster-sample-etcd:2380,cluster-sample-etcd-2=http://cluster-sample-etcd-2.cluster-sample-etcd:2380 + peers := fmt.Sprintf("%[1]s-0=http://%[1]s-0.%[1]s:%[2]d,%[1]s-1=http://%[1]s-1.%[1]s:%[2]d,%[1]s-2=http://%[1]s-2.%[1]s:%[2]d", name.Name, etcdPeerPort) + return corev1.Container{ + Name: name.Name, + Image: "quay.io/coreos/etcd:latest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("512Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("512Mi"), + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: fmt.Sprintf("%s-data", name.Name), + MountPath: "/var/run/etcd", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "client", + ContainerPort: etcdClientPort, + Protocol: "TCP", + }, + { + Name: "peer", + ContainerPort: etcdPeerPort, + Protocol: "TCP", + }, + }, + Env: []corev1.EnvVar{ + { + Name: "PEERS", + Value: peers, + }, + { + Name: "SUBDOMAIN", + Value: name.Name, + }, + }, + Command: []string{ + "/bin/sh", + "-c", + `exec etcd --name ${HOSTNAME} \ + --listen-peer-urls http://0.0.0.0:2380 \ + --listen-client-urls http://0.0.0.0:2379 \ + --advertise-client-urls http://${HOSTNAME}.${SUBDOMAIN}:2379 \ + --initial-advertise-peer-urls http://${HOSTNAME}.${SUBDOMAIN}:2380 \ + --initial-cluster-token etcd-cluster-1 \ + --initial-cluster ${PEERS} \ + --initial-cluster-state new \ + --data-dir /var/run/etcd/default.etcd \ + --auto-compaction-mode=revision \ + --auto-compaction-retention=1`, + }, + } +} + +// ports creates a service port resource definition for the ETCD service. +func (r *ETCDDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: etcdClientPort, Name: "client", Protocol: "TCP"}, + {Port: etcdPeerPort, Name: "peer", Protocol: "TCP"}, + } +} + +// clientPorts creates a service port resource definition for the ETCD headless service. +func (r *ETCDDeployment) clientPorts() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: etcdClientPort, Name: "etcd-client", Protocol: "TCP"}, + } +} diff --git a/internal/etos/etos.go b/internal/etos/etos.go new file mode 100644 index 00000000..a16e5d38 --- /dev/null +++ b/internal/etos/etos.go @@ -0,0 +1,457 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etos + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + etosapi "github.com/eiffel-community/etos/internal/etos/api" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ETOSDeployment struct { + etosv1alpha1.ETOS + client.Client + Scheme *runtime.Scheme + rabbitmqSecret string + messagebusSecret string +} + +// NewETOSDeployment will create a new ETOSDeployment reconciler. +func NewETOSDeployment(spec etosv1alpha1.ETOS, scheme *runtime.Scheme, client client.Client, rabbitmqSecret string, messagebusSecret string) *ETOSDeployment { + return &ETOSDeployment{spec, client, scheme, rabbitmqSecret, messagebusSecret} +} + +// Reconcile will reconcile ETOS to its expected state. +func (r *ETOSDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + var err error + namespacedName := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} + if _, err := r.reconcileIngress(ctx, namespacedName, cluster); err != nil { + return err + } + + _, err = r.reconcileRole(ctx, namespacedName, cluster) + if err != nil { + return err + } + + _, err = r.reconcileServiceAccount(ctx, namespacedName, cluster) + if err != nil { + return err + } + + _, err = r.reconcileRolebinding(ctx, namespacedName, cluster) + if err != nil { + return err + } + + configmap, err := r.reconcileConfigmap(ctx, namespacedName, cluster) + if err != nil { + return err + } + + _, err = r.reconcileSecret(ctx, namespacedName, cluster) + if err != nil { + return err + } + + api := etosapi.NewETOSApiDeployment(r.API, r.Scheme, r.Client, r.rabbitmqSecret, r.messagebusSecret, configmap.Name) + if err := api.Reconcile(ctx, cluster); err != nil { + return err + } + + sse := etosapi.NewETOSSSEDeployment(r.SSE, r.Scheme, r.Client) + if err := sse.Reconcile(ctx, cluster); err != nil { + return err + } + + logarea := etosapi.NewETOSLogAreaDeployment(r.LogArea, r.Scheme, r.Client) + if err := logarea.Reconcile(ctx, cluster); err != nil { + return err + } + + return nil +} + +// reconcileIngress will reconcile the ETOS ingress to its expected state. +func (r *ETOSDeployment) reconcileIngress(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*networkingv1.Ingress, error) { + target := r.ingress(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + ingress := &networkingv1.Ingress{} + if err := r.Get(ctx, name, ingress); err != nil { + if !apierrors.IsNotFound(err) { + return ingress, err + } + if r.Ingress.Enabled { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Ingress.Enabled { + return nil, r.Delete(ctx, ingress) + } + if equality.Semantic.DeepDerivative(target.Spec, ingress.Spec) { + return ingress, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(ingress)) +} + +// reconcileRole will reconcile the ETOS API service account role to its expected state. +func (r *ETOSDeployment) reconcileRole(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.Role, error) { + name.Name = fmt.Sprintf("%s-provider", name.Name) + + labelName := name.Name + name.Name = fmt.Sprintf("%s:sa:environment-provider", name.Name) + + target := r.role(name, labelName) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + role := &rbacv1.Role{} + if err := r.Get(ctx, name, role); err != nil { + if !apierrors.IsNotFound(err) { + return role, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(role)) +} + +// reconcileServiceAccount will reconcile the ETOS API service account to its expected state. +func (r *ETOSDeployment) reconcileServiceAccount(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.ServiceAccount, error) { + name.Name = fmt.Sprintf("%s-provider", name.Name) + + target := r.serviceaccount(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + serviceaccount := &corev1.ServiceAccount{} + if err := r.Get(ctx, name, serviceaccount); err != nil { + if !apierrors.IsNotFound(err) { + return serviceaccount, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(serviceaccount)) +} + +// reconcileRolebinding will reconcile the ETOS API service account role binding to its expected state. +func (r *ETOSDeployment) reconcileRolebinding(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*rbacv1.RoleBinding, error) { + name.Name = fmt.Sprintf("%s-provider", name.Name) + + target := r.rolebinding(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + rolebinding := &rbacv1.RoleBinding{} + if err := r.Get(ctx, name, rolebinding); err != nil { + if !apierrors.IsNotFound(err) { + return rolebinding, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(rolebinding)) +} + +// reconcileConfigmap will reconcile the ETOS configmap to its expected state. +func (r *ETOSDeployment) reconcileConfigmap(ctx context.Context, name types.NamespacedName, cluster *etosv1alpha1.Cluster) (*corev1.ConfigMap, error) { + target := r.configmap(name, cluster) + if err := ctrl.SetControllerReference(cluster, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + configmap := &corev1.ConfigMap{} + if err := r.Get(ctx, name, configmap); err != nil { + if !apierrors.IsNotFound(err) { + return configmap, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(configmap)) +} + +// reconcileSecret will reconcile the secret to its expected state. +func (r *ETOSDeployment) reconcileSecret(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Secret, error) { + target, err := r.secret(ctx, name) + if err != nil { + return target, err + } + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + secret := &corev1.Secret{} + if err := r.Get(ctx, name, secret); err != nil { + if !apierrors.IsNotFound(err) { + return secret, err + } + if err := r.Create(ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Data, secret.Data) { + return secret, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(secret)) +} + +// ingress creates an ingress resource definition for ETOS. +func (r *ETOSDeployment) ingress(name types.NamespacedName) *networkingv1.Ingress { + ingress := &networkingv1.Ingress{ + ObjectMeta: r.meta(name), + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{r.ingressRule(name)}, + }, + } + if r.Ingress.IngressClass != "" { + ingress.Spec.IngressClassName = &r.Ingress.IngressClass + } + return ingress +} + +// configmap creates a configmap definition for ETOS. +func (r *ETOSDeployment) configmap(name types.NamespacedName, cluster *etosv1alpha1.Cluster) *corev1.ConfigMap { + etosHost := name.Name + if r.Ingress.Host != "" { + etosHost = r.Ingress.Host + } + etosApi := fmt.Sprintf("http://%s/api", etosHost) + if r.Config.ETOSApiURL != "" { + etosApi = r.Config.ETOSApiURL + } + eventRepository := cluster.Spec.EventRepository.Host + if r.Config.ETOSEventRepositoryURL != "" { + eventRepository = r.Config.ETOSEventRepositoryURL + } + + data := map[string]string{ + "ETOS_GRAPHQL_SERVER": eventRepository, + "ETOS_CLUSTER": cluster.Name, + "ETOS_NAMESPACE": cluster.Namespace, + "ENVIRONMENT_PROVIDER_SERVICE_ACCOUNT": fmt.Sprintf("%s-provider", cluster.Name), + "SOURCE_HOST": r.Config.Source, + "ETOS_API": etosApi, + "SUITE_RUNNER_IMAGE": cluster.Spec.ETOS.SuiteRunner.Image.Image, + "SUITE_RUNNER_IMAGE_PULL_POLICY": string(cluster.Spec.ETOS.SuiteRunner.ImagePullPolicy), + "LOG_LISTENER_IMAGE": cluster.Spec.ETOS.SuiteRunner.LogListener.Image, + "LOG_LISTENER_IMAGE_PULL_POLICY": string(cluster.Spec.ETOS.SuiteRunner.LogListener.ImagePullPolicy), + "ENVIRONMENT_PROVIDER_IMAGE": cluster.Spec.ETOS.EnvironmentProvider.Image.Image, + "ENVIRONMENT_PROVIDER_IMAGE_PULL_POLICY": string(cluster.Spec.ETOS.EnvironmentProvider.ImagePullPolicy), + "ETR_VERSION": cluster.Spec.ETOS.TestRunner.Version, + "ETOS_ROUTING_KEY_TAG": cluster.Spec.ETOS.Config.RoutingKeyTag, + + "ETOS_ETCD_HOST": cluster.Spec.Database.Etcd.Host, + "ETOS_ETCD_PORT": cluster.Spec.Database.Etcd.Port, + + "DEV": r.Config.Dev, + + // TODO: A few of these seem redundant + "ESR_WAIT_FOR_ENVIRONMENT_TIMEOUT": r.Config.EnvironmentTimeout, + "ETOS_WAIT_FOR_IUT_TIMEOUT": r.Config.EnvironmentTimeout, + "ETOS_EVENT_DATA_TIMEOUT": r.Config.EventDataTimeout, + "ENVIRONMENT_PROVIDER_EVENT_DATA_TIMEOUT": r.Config.EventDataTimeout, + "ENVIRONMENT_PROVIDER_TEST_SUITE_TIMEOUT": r.Config.TestSuiteTimeout, + "ETOS_TEST_SUITE_TIMEOUT": r.Config.TestSuiteTimeout, + } + if cluster.Spec.ETOS.Config.TestRunRetention.Failure != nil { + data["TESTRUN_FAILURE_RETENTION"] = cluster.Spec.ETOS.Config.TestRunRetention.Failure.Duration.String() + } + if cluster.Spec.ETOS.Config.TestRunRetention.Success != nil { + data["TESTRUN_SUCCESS_RETENTION"] = cluster.Spec.ETOS.Config.TestRunRetention.Success.Duration.String() + } + if r.Config.Timezone != "" { + data["TZ"] = r.Config.Timezone + } + return &corev1.ConfigMap{ + ObjectMeta: r.meta(name), + Data: data, + } +} + +// secret creates a secret definition for ETOS. +func (r *ETOSDeployment) secret(ctx context.Context, name types.NamespacedName) (*corev1.Secret, error) { + value, err := r.Config.EncryptionKey.Get(ctx, r.Client, name.Namespace) + if err != nil { + return nil, err + } + return &corev1.Secret{ + ObjectMeta: r.meta(name), + Data: map[string][]byte{ + "ETOS_ENCRYPTION_KEY": value, + }, + }, nil +} + +// meta creates a common meta object for kubernetes resources. +func (r *ETOSDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// ingressRule creates the ingress rules for ETOS. +func (r *ETOSDeployment) ingressRule(name types.NamespacedName) networkingv1.IngressRule { + // TODO: Hard-coded names. + prefix := networkingv1.PathTypePrefix + ingressRule := networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/api", + PathType: &prefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: fmt.Sprintf("%s-etos-api", name.Name), + Port: networkingv1.ServiceBackendPort{ + Number: etosapi.ApiServicePort, + }, + }, + }, + }, + { + Path: "/sse", + PathType: &prefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: fmt.Sprintf("%s-etos-sse", name.Name), + Port: networkingv1.ServiceBackendPort{ + Number: etosapi.SSEServicePort, + }, + }, + }, + }, + { + Path: "/logarea", + PathType: &prefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: fmt.Sprintf("%s-etos-logarea", name.Name), + Port: networkingv1.ServiceBackendPort{ + Number: etosapi.LogAreaServicePort, + }, + }, + }, + }, + }, + }, + }, + } + if r.Ingress.Host != "" { + ingressRule.Host = r.Ingress.Host + } + return ingressRule +} + +// role creates a role resource definition for the ETOS API. +func (r *ETOSDeployment) role(name types.NamespacedName, labelName string) *rbacv1.Role { + meta := r.meta(types.NamespacedName{Name: labelName, Namespace: name.Namespace}) + meta.Name = name.Name + meta.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "true" + return &rbacv1.Role{ + ObjectMeta: meta, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "etos.eiffel-community.github.io", + }, + Resources: []string{ + "testruns", + "providers", + "environmentrequests", + }, + Verbs: []string{ + "get", "list", "watch", + }, + }, + { + APIGroups: []string{"etos.eiffel-community.github.io"}, + Resources: []string{ + "environments", + }, + Verbs: []string{ + "create", "get", "list", "watch", "delete", + }, + }, + }, + } +} + +// serviceaccount creates a service account resource definition for the ETOS API. +func (r *ETOSDeployment) serviceaccount(name types.NamespacedName) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: r.meta(name), + } +} + +// rolebinding creates a rolebinding resource definition for the ETOS API. +func (r *ETOSDeployment) rolebinding(name types.NamespacedName) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: r.meta(name), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "Role", + Name: fmt.Sprintf("%s:sa:environment-provider", name.Name), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: name.Name, + }, + }, + } +} diff --git a/internal/extras/eventrepository.go b/internal/extras/eventrepository.go new file mode 100644 index 00000000..1884ac0d --- /dev/null +++ b/internal/extras/eventrepository.go @@ -0,0 +1,299 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package extras + +import ( + "context" + "fmt" + "net/url" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var graphqlPort int32 = 5000 + +type EventRepositoryDeployment struct { + *etosv1alpha1.EventRepository + client.Client + Scheme *runtime.Scheme + mongoUri url.URL + rabbitmqSecret string + mongodbSecret string +} + +// NewEventRepositoryDeployment will create a new event repository reconciler. +func NewEventRepositoryDeployment(spec *etosv1alpha1.EventRepository, scheme *runtime.Scheme, client client.Client, mongodb *MongoDBDeployment, rabbitmqSecret string) *EventRepositoryDeployment { + return &EventRepositoryDeployment{spec, client, scheme, mongodb.URL, rabbitmqSecret, mongodb.SecretName} +} + +// Reconcile will reconcile the event repository to its expected state. +func (r *EventRepositoryDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + name := fmt.Sprintf("%s-graphql", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + + _, err := r.reconcileDeployment(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileIngress(ctx, namespacedName, cluster) + if err != nil { + return err + } + if r.Ingress.Enabled { + host := namespacedName.Name + if r.Ingress.Host != "" { + host = r.Ingress.Host + } + r.Host = fmt.Sprintf("http://%s/graphql", host) + } + return nil +} + +// reconcileDeployment will reconcile the event repository deployment to its expected state. +func (r *EventRepositoryDeployment) reconcileDeployment(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*appsv1.Deployment, error) { + target := r.deployment(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + deployment := &appsv1.Deployment{} + if err := r.Get(ctx, name, deployment); err != nil { + if !apierrors.IsNotFound(err) { + return deployment, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, deployment) + } + if equality.Semantic.DeepDerivative(target.Spec, deployment.Spec) { + return deployment, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(deployment)) +} + +// reconcileService will reconcile the event repository service to its expected state. +func (r *EventRepositoryDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, service) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// reconcileIngress will reconcile the event repository ingress to its expected state. +func (r *EventRepositoryDeployment) reconcileIngress(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*networkingv1.Ingress, error) { + target := r.ingress(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + ingress := &networkingv1.Ingress{} + if err := r.Get(ctx, name, ingress); err != nil { + if !apierrors.IsNotFound(err) { + return ingress, err + } + if r.Ingress.Enabled { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Ingress.Enabled { + return nil, r.Delete(ctx, ingress) + } + + if equality.Semantic.DeepDerivative(target.Spec, ingress.Spec) { + return ingress, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(ingress)) +} + +// deployment will create a deployment resource definition for the event repository. +func (r *EventRepositoryDeployment) deployment(name types.NamespacedName) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: r.meta(name), + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + Containers: r.containers(name), + }, + }, + }, + } +} + +// service will create a service resource definition for the event repository. +func (r *EventRepositoryDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + } +} + +// ingress will create a ingress resource definition for the event repository. +func (r *EventRepositoryDeployment) ingress(name types.NamespacedName) *networkingv1.Ingress { + ingress := &networkingv1.Ingress{ + ObjectMeta: r.meta(name), + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{r.ingressRule(name)}, + }, + } + if r.Ingress.IngressClass != "" { + ingress.Spec.IngressClassName = &r.Ingress.IngressClass + } + return ingress +} + +// meta will create a common meta resource object for the event repository. +func (r *EventRepositoryDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// containers will create a container resource definition for the event repository deployment. +func (r *EventRepositoryDeployment) containers(name types.NamespacedName) []corev1.Container { + return []corev1.Container{ + { + Name: fmt.Sprintf("%s-api", name.Name), + Image: r.API.Image, + ImagePullPolicy: r.API.ImagePullPolicy, + Ports: []corev1.ContainerPort{ + { + Name: "amqp", + ContainerPort: graphqlPort, + Protocol: "TCP", + }, + }, + EnvFrom: r.environment(), + }, { + Name: fmt.Sprintf("%s-storage", name.Name), + Image: r.Storage.Image, + ImagePullPolicy: r.Storage.ImagePullPolicy, + Command: []string{ + "python3", + "-m", + "eiffel_graphql_api.storage", + }, + EnvFrom: r.environment(), + }, + } +} + +// environment will create an environment resource definition for the event repository deployment. +func (r *EventRepositoryDeployment) environment() []corev1.EnvFromSource { + return []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.mongodbSecret, + }, + }, + }, + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.rabbitmqSecret, + }, + }, + }, + } +} + +// ingressRule will create an ingress rule resource definition for the event repository ingress. +func (r *EventRepositoryDeployment) ingressRule(name types.NamespacedName) networkingv1.IngressRule { + prefix := networkingv1.PathTypePrefix + ingressRule := networkingv1.IngressRule{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/graphql", + PathType: &prefix, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: name.Name, + Port: networkingv1.ServiceBackendPort{ + Number: graphqlPort, + }, + }, + }, + }, + }, + }, + }, + } + if r.Ingress.Host != "" { + ingressRule.Host = r.Ingress.Host + } + return ingressRule +} + +// ports will create a service port resource definition for the event repository service. +func (r *EventRepositoryDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: graphqlPort, Name: "amqp", Protocol: "TCP"}, + } +} diff --git a/internal/extras/messagebus.go b/internal/extras/messagebus.go new file mode 100644 index 00000000..12809f8c --- /dev/null +++ b/internal/extras/messagebus.go @@ -0,0 +1,294 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package extras + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type MessageBusDeployment struct { + etosv1alpha1.RabbitMQ + client.Client + ctx context.Context + Scheme *runtime.Scheme + SecretName string +} + +// NewMessageBusDeployment will create a new messagebus reconciler. +func NewMessageBusDeployment(spec etosv1alpha1.RabbitMQ, scheme *runtime.Scheme, client client.Client) *MessageBusDeployment { + return &MessageBusDeployment{spec, client, nil, scheme, ""} +} + +// Reconcile will reconcile the messagebus to its expected state. +func (r *MessageBusDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + logger := log.FromContext(ctx) + r.ctx = ctx + name := fmt.Sprintf("%s-messagebus", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + if r.Deploy { + logger.Info("Patching host & port when deploying RabbitMQ", "host", name, "port", rabbitmqPort) + r.Host = name + r.Port = fmt.Sprintf("%d", rabbitmqPort) + } + + secret, err := r.reconcileSecret(namespacedName, cluster) + if err != nil { + return err + } + r.SecretName = secret.Name + + _, err = r.reconcileStatefulset(namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(namespacedName, cluster) + if err != nil { + return err + } + return nil +} + +// reconcileSecret will reconcile the messagebus secret to its expected state. +func (r *MessageBusDeployment) reconcileSecret(name types.NamespacedName, owner metav1.Object) (*corev1.Secret, error) { + target, err := r.secret(name) + if err != nil { + return target, err + } + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + secret := &corev1.Secret{} + if err := r.Get(r.ctx, name, secret); err != nil { + if !apierrors.IsNotFound(err) { + return secret, err + } + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Data, secret.Data) { + return secret, nil + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(secret)) +} + +// reconcileStatefulset will reconcile the messagebus statefulset to its expected state. +func (r *MessageBusDeployment) reconcileStatefulset(name types.NamespacedName, owner metav1.Object) (*appsv1.StatefulSet, error) { + target := r.statefulset(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + rabbitmq := &appsv1.StatefulSet{} + if err := r.Get(r.ctx, name, rabbitmq); err != nil { + if !apierrors.IsNotFound(err) { + return rabbitmq, err + } + if r.Deploy { + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(r.ctx, rabbitmq) + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(rabbitmq)) +} + +// reconcileService will reconcile the messagebus service to its expected state. +func (r *MessageBusDeployment) reconcileService(name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(r.ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(r.ctx, service) + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(service)) +} + +// secret will create a secret resource definition for the messagebus. +func (r *MessageBusDeployment) secret(name types.NamespacedName) (*corev1.Secret, error) { + data, err := r.secretData(name.Namespace) + if err != nil { + return nil, err + } + return &corev1.Secret{ + ObjectMeta: r.meta(name), + Data: data, + }, nil +} + +// statefulset will create a statefulset resource definition for the messagebus. +func (r *MessageBusDeployment) statefulset(name types.NamespacedName) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: r.meta(name), + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{r.volumeClaim(name)}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{r.volume(name)}, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// service will create a service resource definition for the messagebus. +func (r *MessageBusDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + } +} + +// secretData will create a map of secrets for the messagebus secret. +func (r *MessageBusDeployment) secretData(namespace string) (map[string][]byte, error) { + data := map[string][]byte{ + "ETOS_RABBITMQ_HOST": []byte(r.Host), + "ETOS_RABBITMQ_EXCHANGE": []byte(r.Exchange), + "ETOS_RABBITMQ_PORT": []byte(r.Port), + "ETOS_RABBITMQ_SSL": []byte(r.SSL), + "ETOS_RABBITMQ_VHOST": []byte(r.Vhost), + } + + if r.Password != nil { + password, err := r.Password.Get(r.ctx, r.Client, namespace) + if err != nil { + return nil, err + } + data["ETOS_RABBITMQ_PASSWORD"] = password + } + if r.Username != "" { + data["ETOS_RABBITMQ_USERNAME"] = []byte(r.Username) + } + if r.QueueName != "" { + data["ETOS_RABBITMQ_QUEUE_NAME"] = []byte(r.QueueName) + } + if r.QueueParams != "" { + data["ETOS_RABBITMQ_QUEUE_PARAMS"] = []byte(r.QueueParams) + } + return data, nil +} + +// meta will create a common meta resource object for the messagebus. +func (r *MessageBusDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// volumeClaim will create a volume claim resource definition for the messagebus statefulset. +func (r *MessageBusDeployment) volumeClaim(name types.NamespacedName) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-data", name.Name), + Namespace: name.Namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, + }, + }, + } +} + +// volume will create a volume resource definition for the messagebus statefulset. +func (r *MessageBusDeployment) volume(name types.NamespacedName) corev1.Volume { + return corev1.Volume{ + Name: fmt.Sprintf("%s-data", name.Name), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("%s-data", name.Name), + }, + }, + } +} + +// container will create a container resource definition for the messagebus statefulset. +func (r *MessageBusDeployment) container(name types.NamespacedName) corev1.Container { + return corev1.Container{ + Name: name.Name, + Image: "rabbitmq:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: fmt.Sprintf("%s-data", name.Name), + MountPath: "/var/lib/rabbitmq/data", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "amqp", + ContainerPort: rabbitmqPort, + Protocol: "TCP", + }, + }, + } +} + +// ports will create a ports resource definition for the messagebus service. +func (r *MessageBusDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: rabbitmqPort, Name: "amqp", Protocol: "TCP"}, + } +} diff --git a/internal/extras/mongodb.go b/internal/extras/mongodb.go new file mode 100644 index 00000000..8aab481e --- /dev/null +++ b/internal/extras/mongodb.go @@ -0,0 +1,303 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package extras + +import ( + "context" + "fmt" + "net/url" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var mongodbPort int32 = 27017 + +type MongoDBDeployment struct { + etosv1alpha1.MongoDB + client.Client + Scheme *runtime.Scheme + URL url.URL + SecretName string +} + +// NewMongoDBDeployment will create a new MongoDB reconciler. +func NewMongoDBDeployment(spec etosv1alpha1.MongoDB, scheme *runtime.Scheme, client client.Client) *MongoDBDeployment { + return &MongoDBDeployment{spec, client, scheme, url.URL{}, ""} +} + +// Reconcile will reconcile MongoDB to its expected state. +func (r *MongoDBDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + logger := log.FromContext(ctx) + name := fmt.Sprintf("%s-mongodb", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + + if (url.URL{}) == r.URL { + uri, err := r.URI.Get(ctx, r.Client, cluster.Namespace) + if err != nil { + return err + } + mongodbURL, err := url.Parse(string(uri)) + if err != nil { + return err + } + r.URL = *mongodbURL + } + + if r.Deploy { + logger.Info("Patching host & port when deploying mongodb", "host", name, "port", mongodbPort) + r.URL.Host = fmt.Sprintf("%s:%d", name, mongodbPort) + r.URI.Value = r.URL.String() + } + secret, err := r.reconcileSecret(ctx, namespacedName, cluster) + if err != nil { + return err + } + r.SecretName = secret.Name + + _, err = r.reconcileStatefulset(ctx, namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(ctx, namespacedName, cluster) + if err != nil { + return err + } + + return nil +} + +// reconcileSecret will reconcile the MongoDB secret to its expected state. +func (r *MongoDBDeployment) reconcileSecret(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Secret, error) { + logger := log.FromContext(ctx) + target := r.secret(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + secret := &corev1.Secret{} + if err := r.Get(ctx, name, secret); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "failed to get mongodb secret") + return secret, err + } + logger.Info("Secret not found. Creating") + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, secret) + } + if equality.Semantic.DeepDerivative(target.Data, secret.Data) { + return secret, nil + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(secret)) +} + +// reconcileStatefulset will reconcile the MongoDB statefulset to its expected state. +func (r *MongoDBDeployment) reconcileStatefulset(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*appsv1.StatefulSet, error) { + target := r.statefulset(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + mongodb := &appsv1.StatefulSet{} + if err := r.Get(ctx, name, mongodb); err != nil { + if !apierrors.IsNotFound(err) { + return mongodb, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return mongodb, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, mongodb) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(mongodb)) +} + +// reconcileService will reconcile the MongoDB service to its expected state. +func (r *MongoDBDeployment) reconcileService(ctx context.Context, name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(ctx, target); err != nil { + return target, err + } + } + return service, nil + } else if !r.Deploy { + return nil, r.Delete(ctx, service) + } + return target, r.Patch(ctx, target, client.StrategicMergeFrom(service)) +} + +// secret will create a secret resource definition for MongoDB. +func (r *MongoDBDeployment) secret(name types.NamespacedName) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: r.meta(name), + Data: r.secretData(), + } +} + +// statefulset will create a statefulset resource definition for MongoDB. +func (r *MongoDBDeployment) statefulset(name types.NamespacedName) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: r.meta(name), + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{r.volumeClaim(name)}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{r.volume(name)}, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// service will create a service resource definition for MongoDB. +func (r *MongoDBDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + } +} + +// secretData will create a map of secret data for the MongoDB secret. +func (r *MongoDBDeployment) secretData() map[string][]byte { + return map[string][]byte{ + "MONGODB_CONNSTRING": []byte(r.URL.String()), + "MONGODB_DATABASE": []byte(r.URL.Path[1:]), // Path always start with '/' + } +} + +// meta will create a common meta object for MongoDB. +func (r *MongoDBDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// volumeClaim will create a volume claim resource definition for the MongoDB statefulset. +func (r *MongoDBDeployment) volumeClaim(name types.NamespacedName) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-data", name.Name), + Namespace: name.Namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, + }, + }, + } +} + +// volume will create a volume resource definition for the MongoDB statefulset. +func (r *MongoDBDeployment) volume(name types.NamespacedName) corev1.Volume { + return corev1.Volume{ + Name: fmt.Sprintf("%s-data", name.Name), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("%s-data", name.Name), + }, + }, + } +} + +// container will create a container resource definition for the MongoDB statefulset. +func (r *MongoDBDeployment) container(name types.NamespacedName) corev1.Container { + password, _ := r.URL.User.Password() + return corev1.Container{ + Name: name.Name, + Image: "mongo:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: fmt.Sprintf("%s-data", name.Name), + MountPath: "/data/db", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "mongo", + ContainerPort: mongodbPort, + Protocol: "TCP", + }, + }, + Env: []corev1.EnvVar{ + { + Name: "MONGO_INITDB_DATABASE", + Value: r.URL.Path[1:], // Path always start with '/' + }, + { + Name: "MONGO_INITDB_ROOT_USERNAME", + Value: r.URL.User.Username(), + }, + { + Name: "MONGO_INITDB_ROOT_PASSWORD", + Value: password, + }, + }, + } +} + +// ports will create a service port resource definition for the MongoDB service. +func (r *MongoDBDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: mongodbPort, Name: "mongo", Protocol: "TCP"}, + } +} diff --git a/internal/extras/rabbitmq.go b/internal/extras/rabbitmq.go new file mode 100644 index 00000000..4b94c480 --- /dev/null +++ b/internal/extras/rabbitmq.go @@ -0,0 +1,299 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package extras + +import ( + "context" + "fmt" + + etosv1alpha1 "github.com/eiffel-community/etos/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var rabbitmqPort int32 = 5672 + +type RabbitMQDeployment struct { + etosv1alpha1.RabbitMQ + client.Client + ctx context.Context + Scheme *runtime.Scheme + SecretName string +} + +// NewRabbitMQDeployment will create a new RabbitMQ reconciler. +func NewRabbitMQDeployment(spec etosv1alpha1.RabbitMQ, scheme *runtime.Scheme, client client.Client) *RabbitMQDeployment { + return &RabbitMQDeployment{spec, client, nil, scheme, ""} +} + +// Reconcile will reconcile RabbitMQ to its expected state. +func (r *RabbitMQDeployment) Reconcile(ctx context.Context, cluster *etosv1alpha1.Cluster) error { + logger := log.FromContext(ctx) + r.ctx = ctx + name := fmt.Sprintf("%s-rabbitmq", cluster.Name) + namespacedName := types.NamespacedName{Name: name, Namespace: cluster.Namespace} + if r.Deploy { + logger.Info("Patching host & port when deploying RabbitMQ", "host", name, "port", rabbitmqPort) + r.Host = name + r.Port = fmt.Sprintf("%d", rabbitmqPort) + } + + secret, err := r.reconcileSecret(namespacedName, cluster) + if err != nil { + return err + } + r.SecretName = secret.Name + + _, err = r.reconcileStatefulset(namespacedName, cluster) + if err != nil { + return err + } + _, err = r.reconcileService(namespacedName, cluster) + if err != nil { + return err + } + + return nil +} + +// reconcileSecret will reconcile the RabbitMQ secret to its expected state. +func (r *RabbitMQDeployment) reconcileSecret(name types.NamespacedName, owner metav1.Object) (*corev1.Secret, error) { + logger := log.FromContext(r.ctx) + target, err := r.secret(name) + if err != nil { + return target, err + } + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + scheme.Scheme.Default(target) + + secret := &corev1.Secret{} + if err := r.Get(r.ctx, name, secret); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "failed to get rabbitmq secret") + return secret, err + } + logger.Info("Secret not found. Creating") + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + return target, nil + } + if equality.Semantic.DeepDerivative(target.Data, secret.Data) { + return secret, nil + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(secret)) +} + +// reconcileStatefulset will reconcile the RabbitMQ statefulset to its expected state. +func (r *RabbitMQDeployment) reconcileStatefulset(name types.NamespacedName, owner metav1.Object) (*appsv1.StatefulSet, error) { + target := r.statefulset(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + rabbitmq := &appsv1.StatefulSet{} + if err := r.Get(r.ctx, name, rabbitmq); err != nil { + if !apierrors.IsNotFound(err) { + return rabbitmq, err + } + if r.Deploy { + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(r.ctx, rabbitmq) + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(rabbitmq)) +} + +// reconcileService will reconcile the RabbitMQ service to its expected state. +func (r *RabbitMQDeployment) reconcileService(name types.NamespacedName, owner metav1.Object) (*corev1.Service, error) { + target := r.service(name) + if err := ctrl.SetControllerReference(owner, target, r.Scheme); err != nil { + return target, err + } + + service := &corev1.Service{} + if err := r.Get(r.ctx, name, service); err != nil { + if !apierrors.IsNotFound(err) { + return service, err + } + if r.Deploy { + if err := r.Create(r.ctx, target); err != nil { + return target, err + } + } + return target, nil + } else if !r.Deploy { + return nil, r.Delete(r.ctx, service) + } + return target, r.Patch(r.ctx, target, client.StrategicMergeFrom(service)) +} + +// secret will create a secret resource definition for RabbitMQ. +func (r *RabbitMQDeployment) secret(name types.NamespacedName) (*corev1.Secret, error) { + data, err := r.secretData(name) + if err != nil { + return nil, err + } + return &corev1.Secret{ + ObjectMeta: r.meta(name), + Data: data, + }, nil +} + +// statefulset will create a statefulset resource definition for RabbitMQ. +func (r *RabbitMQDeployment) statefulset(name types.NamespacedName) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: r.meta(name), + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{r.volumeClaim(name)}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: r.meta(name), + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{r.volume(name)}, + Containers: []corev1.Container{r.container(name)}, + }, + }, + }, + } +} + +// service will create a service resource definition for RabbitMQ. +func (r *RabbitMQDeployment) service(name types.NamespacedName) *corev1.Service { + return &corev1.Service{ + ObjectMeta: r.meta(name), + Spec: corev1.ServiceSpec{ + Ports: r.ports(), + Selector: map[string]string{"app.kubernetes.io/name": name.Name}, + }, + } +} + +// secretData will create a map of secrets for the RabbitMQ secret. +func (r *RabbitMQDeployment) secretData(name types.NamespacedName) (map[string][]byte, error) { + data := map[string][]byte{ + "RABBITMQ_HOST": []byte(r.Host), + "RABBITMQ_EXCHANGE": []byte(r.Exchange), + "RABBITMQ_PORT": []byte(r.Port), + "RABBITMQ_SSL": []byte(r.SSL), + "RABBITMQ_VHOST": []byte(r.Vhost), + } + if r.Password != nil { + password, err := r.Password.Get(r.ctx, r.Client, name.Namespace) + if err != nil { + return nil, err + } + data["RABBITMQ_PASSWORD"] = password + } + if r.Username != "" { + data["RABBITMQ_USERNAME"] = []byte(r.Username) + } + if r.QueueName != "" { + data["RABBITMQ_QUEUE"] = []byte(r.QueueName) + } + if r.QueueParams != "" { + data["RABBITMQ_QUEUE_PARAMS"] = []byte(r.QueueParams) + } + return data, nil +} + +// meta will create a common meta object for RabbitMQ. +func (r *RabbitMQDeployment) meta(name types.NamespacedName) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": name.Name, + }, + Annotations: make(map[string]string), + Name: name.Name, + Namespace: name.Namespace, + } +} + +// volumeClaim will create a volume claim resource definition for the RabbitMQ statefulset. +func (r *RabbitMQDeployment) volumeClaim(name types.NamespacedName) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-data", name.Name), + Namespace: name.Namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, + }, + }, + } +} + +// volume will create a volume resource definition for the RabbitMQ statefulset. +func (r *RabbitMQDeployment) volume(name types.NamespacedName) corev1.Volume { + return corev1.Volume{ + Name: fmt.Sprintf("%s-data", name.Name), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("%s-data", name.Name), + }, + }, + } +} + +// container will create a container resource definition for the RabbitMQ statefulset. +func (r *RabbitMQDeployment) container(name types.NamespacedName) corev1.Container { + return corev1.Container{ + Name: name.Name, + Image: "rabbitmq:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: fmt.Sprintf("%s-data", name.Name), + MountPath: "/var/lib/rabbitmq/data", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "amqp", + ContainerPort: rabbitmqPort, + Protocol: "TCP", + }, + }, + } +} + +// ports will create a service port resource definition for the RabbitMQ service. +func (r *RabbitMQDeployment) ports() []corev1.ServicePort { + return []corev1.ServicePort{ + {Port: rabbitmqPort, Name: "amqp", Protocol: "TCP"}, + } +} diff --git a/scripts/split_installer.py b/scripts/split_installer.py new file mode 100644 index 00000000..c0dfe370 --- /dev/null +++ b/scripts/split_installer.py @@ -0,0 +1,68 @@ +# Copyright Axis Communications AB. +# +# For a full list of individual contributors, please see the commit history. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from typing import Iterator, Any +from pathlib import Path +from yaml import load_all, SafeLoader, dump_all, dump, SafeDumper + + +def split(installer: Iterator[Any]) -> tuple[list[dict], list[dict], dict]: + cluster = [] + namespaced = [] + namespace = None + for resource in installer: + if resource.get("kind") == "Namespace": + namespace = resource + continue + metadata = resource.get("metadata", {}) + if metadata.get("namespace") is None: + cluster.append(resource) + else: + namespaced.append(resource) + return cluster, namespaced, namespace + + +def run(installer: str): + path = Path(installer) + assert path.exists(), f"{installer} does not exist" + directory = path.parent + with path.open() as installer_file: + installer_generator = load_all(installer_file, Loader=SafeLoader) + cluster, namespaced, namespace = split(installer_generator) + print("=== CLUSTER ===") + for resource in cluster: + print(f"[{resource.get('kind')}]: {resource.get('metadata', {}).get('name')}") + with directory.joinpath("cluster.yaml").open("w") as cluster_file: + dump_all(cluster, cluster_file, Dumper=SafeDumper) + print("=== NAMESPACED ===") + for resource in namespaced: + print(f"[{resource.get('kind')}]: {resource.get('metadata', {}).get('namespace')}/{resource.get('metadata', {}).get('name')}") + with directory.joinpath("namespaced.yaml").open("w") as namespaced_file: + dump_all(namespaced, namespaced_file, Dumper=SafeDumper) + print("=== NAMESPACE ===") + print(f"[{namespace.get('kind')}]: {namespace.get('metadata', {}).get('name')}") + with directory.joinpath("namespace.yaml").open("w") as namespace_file: + dump(namespace, namespace_file, Dumper=SafeDumper) + print() + print("Successfully split the installer") + print("Files are located here:") + print(f" - {directory.joinpath('namespaced.yaml')}") + print(f" - {directory.joinpath('namespace.yaml')}") + print(f" - {directory.joinpath('cluster.yaml')}") + + +if __name__ == "__main__": + run(sys.argv[1]) diff --git a/requirements.txt b/source/requirements.txt similarity index 100% rename from requirements.txt rename to source/requirements.txt diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 00000000..fb9d2485 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,32 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Run e2e tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + fmt.Fprintf(GinkgoWriter, "Starting etos suite\n") + RunSpecs(t, "e2e suite") +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go new file mode 100644 index 00000000..fade927d --- /dev/null +++ b/test/e2e/e2e_test.go @@ -0,0 +1,121 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/eiffel-community/etos/test/utils" +) + +const namespace = "etos-system" + +var _ = Describe("controller", Ordered, func() { + BeforeAll(func() { + By("installing prometheus operator") + Expect(utils.InstallPrometheusOperator()).To(Succeed()) + + By("installing the cert-manager") + Expect(utils.InstallCertManager()).To(Succeed()) + + By("creating manager namespace") + cmd := exec.Command("kubectl", "create", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + AfterAll(func() { + By("uninstalling the Prometheus manager bundle") + utils.UninstallPrometheusOperator() + + By("uninstalling the cert-manager bundle") + utils.UninstallCertManager() + + By("removing manager namespace") + cmd := exec.Command("kubectl", "delete", "ns", namespace) + _, _ = utils.Run(cmd) + }) + + Context("Operator", func() { + It("should run successfully", func() { + var controllerPodName string + var err error + + // projectimage stores the name of the image used in the example + projectimage := "example.com/etos:v0.0.1" + + By("building the manager(Operator) image") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectimage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("validating that the controller-manager pod is running as expected") + verifyControllerUp := func() error { + // Get pod name + + cmd = exec.Command("kubectl", "get", + "pods", "-l", "control-plane=controller-manager", + "-o", "go-template={{ range .items }}"+ + "{{ if not .metadata.deletionTimestamp }}"+ + "{{ .metadata.name }}"+ + "{{ \"\\n\" }}{{ end }}{{ end }}", + "-n", namespace, + ) + + podOutput, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + podNames := utils.GetNonEmptyLines(string(podOutput)) + if len(podNames) != 1 { + return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) + } + controllerPodName = podNames[0] + ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) + + // Validate pod status + cmd = exec.Command("kubectl", "get", + "pods", controllerPodName, "-o", "jsonpath={.status.phase}", + "-n", namespace, + ) + status, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) + if string(status) != "Running" { + return fmt.Errorf("controller pod in %s status", status) + } + return nil + } + EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + }) + }) +}) diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 00000000..d1918032 --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,140 @@ +// Copyright Axis Communications AB. +// +// For a full list of individual contributors, please see the commit history. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "os" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +const ( + prometheusOperatorVersion = "v0.72.0" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.14.4" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +func warnError(err error) { + fmt.Fprintf(GinkgoWriter, "warning: %v\n", err) +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "create", "-f", url) + _, err := Run(cmd) + return err +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + fmt.Fprintf(GinkgoWriter, "running: %s\n", command) + output, err := cmd.CombinedOutput() + if err != nil { + return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + + return output, nil +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "delete", "-f", url) + if _, err := Run(cmd); err != nil { + warnError(err) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command("kubectl", "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// LoadImageToKindCluster loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + cluster := "kind" + if v, ok := os.LookupEnv("KIND_CLUSTER"); ok { + cluster = v + } + kindOptions := []string{"load", "docker-image", name, "--name", cluster} + cmd := exec.Command("kind", kindOptions...) + _, err := Run(cmd) + return err +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/test/e2e", "", -1) + return wd, nil +}