From 72152a128085a1d202979e7b3725114a2f9184c0 Mon Sep 17 00:00:00 2001 From: Philipp Matthes Date: Wed, 27 Aug 2025 09:17:26 +0200 Subject: [PATCH 1/3] Schedule commitments with new reservations operator & crd --- .dockerignore | 2 + .github/workflows/push-charts.yaml | 18 + .github/workflows/push-images.yaml | 41 + .github/workflows/test.yaml | 70 +- .github/workflows/update-appversion.yml | 14 + .gitignore | 2 + Dockerfile.kubebuilder | 31 + Tiltfile | 20 +- cortex.secrets.example.yaml | 24 +- go.mod | 56 +- go.sum | 136 ++- helm/bundles/cortex-cinder/Chart.yaml | 2 +- helm/bundles/cortex-manila/Chart.yaml | 2 +- helm/bundles/cortex-nova/Chart.yaml | 2 +- helm/library/cortex-core/Chart.yaml | 2 +- .../cortex-core/templates/deployment.yaml | 2 + helm/library/cortex-core/templates/rbac.yaml | 18 + internal/conf/conf.go | 43 +- internal/conf/conf_test.go | 2 +- internal/conf/validation.go | 4 +- internal/conf/validation_test.go | 14 +- internal/mqtt/mqtt.go | 3 +- internal/scheduler/nova/api/messages.go | 8 +- .../shared/filter_has_enough_capacity.go | 90 +- .../shared/filter_has_enough_capacity_test.go | 332 +++--- internal/sync/openstack/nova/nova_types.go | 8 +- main.go | 2 +- .../dashboards/cortex-reservations.json | 1017 +++++++++++++++++ .../dashboards/cortex-reservations.license | 2 + reservations/LICENSE | 201 ++++ reservations/Makefile | 50 + reservations/PROJECT | 22 + reservations/api/LICENSE | 201 ++++ reservations/api/go.mod | 27 + reservations/api/go.sum | 100 ++ .../api/v1alpha1/computereservation_types.go | 117 ++ .../api/v1alpha1/groupversion_info.go | 23 + .../api/v1alpha1/zz_generated.deepcopy.go | 146 +++ reservations/cmd/main.go | 246 ++++ ...servations.cortex_computereservations.yaml | 163 +++ reservations/config/crd/kustomization.yaml | 16 + reservations/config/crd/kustomizeconfig.yaml | 19 + .../default/cert_metrics_manager_patch.yaml | 30 + .../config/default/kustomization.yaml | 234 ++++ .../config/default/manager_metrics_patch.yaml | 4 + .../config/default/metrics_service.yaml | 18 + .../config/manager/kustomization.yaml | 8 + reservations/config/manager/manager.yaml | 77 ++ .../network-policy/allow-metrics-traffic.yaml | 27 + .../config/network-policy/kustomization.yaml | 2 + .../config/prometheus/kustomization.yaml | 11 + reservations/config/prometheus/monitor.yaml | 27 + .../config/prometheus/monitor_tls_patch.yaml | 19 + .../rbac/computereservation_admin_role.yaml | 27 + .../rbac/computereservation_editor_role.yaml | 33 + .../rbac/computereservation_viewer_role.yaml | 29 + reservations/config/rbac/kustomization.yaml | 28 + .../config/rbac/leader_election_role.yaml | 40 + .../rbac/leader_election_role_binding.yaml | 15 + .../config/rbac/metrics_auth_role.yaml | 17 + .../rbac/metrics_auth_role_binding.yaml | 12 + .../config/rbac/metrics_reader_role.yaml | 9 + reservations/config/rbac/role.yaml | 32 + reservations/config/rbac/role_binding.yaml | 15 + reservations/config/rbac/service_account.yaml | 8 + reservations/dist/chart/.helmignore | 25 + reservations/dist/chart/Chart.lock | 6 + reservations/dist/chart/Chart.yaml | 14 + .../dist/chart/charts/owner-info-1.0.0.tgz | Bin 0 -> 2139 bytes .../dist/chart/templates/_helpers.tpl | 50 + .../templates/certmanager/certificate.yaml | 36 + ...servations.cortex_computereservations.yaml | 170 +++ .../dist/chart/templates/manager/manager.yaml | 107 ++ .../templates/metrics/metrics-service.yaml | 18 + .../network-policy/allow-metrics-traffic.yaml | 28 + .../chart/templates/prometheus/monitor.yaml | 40 + .../rbac/computereservation_admin_role.yaml | 28 + .../rbac/computereservation_editor_role.yaml | 34 + .../rbac/computereservation_viewer_role.yaml | 30 + .../templates/rbac/leader_election_role.yaml | 42 + .../rbac/leader_election_role_binding.yaml | 17 + .../templates/rbac/metrics_auth_role.yaml | 21 + .../rbac/metrics_auth_role_binding.yaml | 16 + .../templates/rbac/metrics_reader_role.yaml | 13 + .../dist/chart/templates/rbac/role.yaml | 36 + .../chart/templates/rbac/role_binding.yaml | 16 + .../chart/templates/rbac/service_account.yaml | 15 + reservations/dist/chart/values.yaml | 129 +++ reservations/go.mod | 115 ++ reservations/go.sum | 388 +++++++ reservations/hack/boilerplate.go.txt | 2 + reservations/internal/controller/conf.go | 22 + reservations/internal/controller/conf_test.go | 128 +++ .../internal/controller/controller.go | 193 ++++ .../internal/controller/controller_test.go | 456 ++++++++ reservations/internal/controller/monitor.go | 106 ++ .../internal/controller/monitor_test.go | 431 +++++++ reservations/internal/controller/syncer.go | 484 ++++++++ .../internal/controller/syncer_test.go | 604 ++++++++++ 99 files changed, 7580 insertions(+), 260 deletions(-) create mode 100644 Dockerfile.kubebuilder create mode 100644 helm/library/cortex-core/templates/rbac.yaml create mode 100644 plutono/provisioning/dashboards/cortex-reservations.json create mode 100644 plutono/provisioning/dashboards/cortex-reservations.license create mode 100644 reservations/LICENSE create mode 100644 reservations/Makefile create mode 100644 reservations/PROJECT create mode 100644 reservations/api/LICENSE create mode 100644 reservations/api/go.mod create mode 100644 reservations/api/go.sum create mode 100644 reservations/api/v1alpha1/computereservation_types.go create mode 100644 reservations/api/v1alpha1/groupversion_info.go create mode 100644 reservations/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 reservations/cmd/main.go create mode 100644 reservations/config/crd/bases/reservations.cortex_computereservations.yaml create mode 100644 reservations/config/crd/kustomization.yaml create mode 100644 reservations/config/crd/kustomizeconfig.yaml create mode 100644 reservations/config/default/cert_metrics_manager_patch.yaml create mode 100644 reservations/config/default/kustomization.yaml create mode 100644 reservations/config/default/manager_metrics_patch.yaml create mode 100644 reservations/config/default/metrics_service.yaml create mode 100644 reservations/config/manager/kustomization.yaml create mode 100644 reservations/config/manager/manager.yaml create mode 100644 reservations/config/network-policy/allow-metrics-traffic.yaml create mode 100644 reservations/config/network-policy/kustomization.yaml create mode 100644 reservations/config/prometheus/kustomization.yaml create mode 100644 reservations/config/prometheus/monitor.yaml create mode 100644 reservations/config/prometheus/monitor_tls_patch.yaml create mode 100644 reservations/config/rbac/computereservation_admin_role.yaml create mode 100644 reservations/config/rbac/computereservation_editor_role.yaml create mode 100644 reservations/config/rbac/computereservation_viewer_role.yaml create mode 100644 reservations/config/rbac/kustomization.yaml create mode 100644 reservations/config/rbac/leader_election_role.yaml create mode 100644 reservations/config/rbac/leader_election_role_binding.yaml create mode 100644 reservations/config/rbac/metrics_auth_role.yaml create mode 100644 reservations/config/rbac/metrics_auth_role_binding.yaml create mode 100644 reservations/config/rbac/metrics_reader_role.yaml create mode 100644 reservations/config/rbac/role.yaml create mode 100644 reservations/config/rbac/role_binding.yaml create mode 100644 reservations/config/rbac/service_account.yaml create mode 100644 reservations/dist/chart/.helmignore create mode 100644 reservations/dist/chart/Chart.lock create mode 100644 reservations/dist/chart/Chart.yaml create mode 100644 reservations/dist/chart/charts/owner-info-1.0.0.tgz create mode 100644 reservations/dist/chart/templates/_helpers.tpl create mode 100644 reservations/dist/chart/templates/certmanager/certificate.yaml create mode 100644 reservations/dist/chart/templates/crd/reservations.cortex_computereservations.yaml create mode 100644 reservations/dist/chart/templates/manager/manager.yaml create mode 100644 reservations/dist/chart/templates/metrics/metrics-service.yaml create mode 100644 reservations/dist/chart/templates/network-policy/allow-metrics-traffic.yaml create mode 100644 reservations/dist/chart/templates/prometheus/monitor.yaml create mode 100644 reservations/dist/chart/templates/rbac/computereservation_admin_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/computereservation_editor_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/computereservation_viewer_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/leader_election_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/leader_election_role_binding.yaml create mode 100644 reservations/dist/chart/templates/rbac/metrics_auth_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/metrics_auth_role_binding.yaml create mode 100644 reservations/dist/chart/templates/rbac/metrics_reader_role.yaml create mode 100644 reservations/dist/chart/templates/rbac/role.yaml create mode 100644 reservations/dist/chart/templates/rbac/role_binding.yaml create mode 100644 reservations/dist/chart/templates/rbac/service_account.yaml create mode 100644 reservations/dist/chart/values.yaml create mode 100644 reservations/go.mod create mode 100644 reservations/go.sum create mode 100644 reservations/hack/boilerplate.go.txt create mode 100644 reservations/internal/controller/conf.go create mode 100644 reservations/internal/controller/conf_test.go create mode 100644 reservations/internal/controller/controller.go create mode 100644 reservations/internal/controller/controller_test.go create mode 100644 reservations/internal/controller/monitor.go create mode 100644 reservations/internal/controller/monitor_test.go create mode 100644 reservations/internal/controller/syncer.go create mode 100644 reservations/internal/controller/syncer_test.go diff --git a/.dockerignore b/.dockerignore index b1491128c..f75123e78 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,3 +21,5 @@ /report.html /shell.nix /testing/ + +bin/ \ No newline at end of file diff --git a/.github/workflows/push-charts.yaml b/.github/workflows/push-charts.yaml index 1118845c7..69ca46887 100644 --- a/.github/workflows/push-charts.yaml +++ b/.github/workflows/push-charts.yaml @@ -62,3 +62,21 @@ jobs: CHART_PACKAGE=$(ls $CHART_DIR/*.tgz) helm push $CHART_PACKAGE oci://${{ env.REGISTRY }}/${{ github.repository }}/charts/ done + - name: Get all changed reservations Chart.yaml files + id: changed-chart-yaml-files-reservations + uses: tj-actions/changed-files@v46 + with: + files: | + reservations/dist/chart/Chart.yaml + - name: Push reservations charts to registry + if: steps.changed-chart-yaml-files-reservations.outputs.all_changed_files != '' + shell: bash + env: + ALL_CHANGED_FILES: ${{ steps.changed-chart-yaml-files-reservations.outputs.all_changed_files }} + run: | + for CHART_FILE in ${ALL_CHANGED_FILES}; do + CHART_DIR=$(dirname $CHART_FILE) + helm package $CHART_DIR --dependency-update --destination $CHART_DIR + CHART_PACKAGE=$(ls $CHART_DIR/*.tgz) + helm push $CHART_PACKAGE oci://${{ env.REGISTRY }}/${{ github.repository }}/charts/ + done diff --git a/.github/workflows/push-images.yaml b/.github/workflows/push-images.yaml index c42a39aea..175637808 100644 --- a/.github/workflows/push-images.yaml +++ b/.github/workflows/push-images.yaml @@ -98,3 +98,44 @@ jobs: subject-name: ${{ env.REGISTRY }}/${{ github.repository }} subject-digest: ${{ steps.push_cortex.outputs.digest }} push-to-registry: true + # Only build and push the reservations operator image if there are changes + # in the reservations directory. + - name: Get all changed reservations/ files + id: changed_reservations_files + uses: tj-actions/changed-files@v46 + with: + files: | + reservations/** + - name: Docker Meta (Cortex Reservations) + if: steps.changed_reservations_files.outputs.all_changed_files != '' + id: meta_cortex_reservations + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }}-reservations-operator + tags: | + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha + latest + - name: Build and Push Cortex Reservations Operator + if: steps.changed_reservations_files.outputs.all_changed_files != '' + id: push_cortex_reservations + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.kubebuilder + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta_cortex_reservations.outputs.tags }} + labels: ${{ steps.meta_cortex_reservations.outputs.labels }} + build-args: | + GO_MOD_PATH=reservations + GIT_TAG=${{ github.ref_name }} + GIT_COMMIT=${{ github.sha }} + - name: Generate Artifact Attestation for Cortex Reservations + if: steps.changed_reservations_files.outputs.all_changed_files != '' + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.REGISTRY }}/${{ github.repository }}-reservations-operator + subject-digest: ${{ steps.push_cortex_reservations.outputs.digest }} + push-to-registry: true diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index af15235ad..4f0dbdc0c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -20,7 +20,11 @@ jobs: with: go-version: 1.25.0 - name: Test quickly without Docker - run: go test -v ./... + run: | + echo "Testing main module..." + go test -v ./... + echo "Testing reservations module..." + cd reservations && go test -v ./... test-with-docker: # We don't need to run this longer test if the previous one already failed. @@ -43,15 +47,27 @@ jobs: export GITHUB_ACTIONS=1 export POSTGRES_CONTAINER=1 export RABBITMQ_CONTAINER=1 + + echo "Running tests for main module..." go test -v \ -coverpkg=./internal/... \ -coverprofile=pr_profile.cov ./internal/... go tool cover -func pr_profile.cov > pr_func_coverage.txt - - name: Upload coverage file + + echo "Running tests for reservations module..." + cd reservations + go test -v \ + -coverpkg=./internal/... \ + -coverprofile=reservations_profile.cov ./internal/... + go tool cover -func reservations_profile.cov > reservations_func_coverage.txt + cd .. + - name: Upload coverage files uses: actions/upload-artifact@v4 with: name: pr-func-coverage - path: pr_func_coverage.txt + path: | + pr_func_coverage.txt + reservations/reservations_func_coverage.txt # Steps below are only executed if the workflow is triggered by a pull request - name: Delete old coverage comments (PR only) if: ${{ github.event_name == 'pull_request' }} @@ -73,7 +89,7 @@ jobs: }); } } - - name: Download coverage file (PR only) + - name: Download coverage files (PR only) if: ${{ github.event_name == 'pull_request' }} uses: actions/download-artifact@v5 with: @@ -87,20 +103,46 @@ jobs: const fs = require('fs'); const path = require('path'); - // Extract the last line of the coverage report - const coverageReport = fs.readFileSync('pr_func_coverage.txt', 'utf8'); - const lines = coverageReport.trim().split('\n'); - const lastLine = lines[lines.length - 1]; - const coverageMatch = lastLine.match(/total:\s+\(statements\)\s+(\d+\.\d+)%/); - const coveragePercentage = coverageMatch ? coverageMatch[1] : 'unknown'; + // Read main module coverage report + const mainCoverageReport = fs.readFileSync('pr_func_coverage.txt', 'utf8'); + const mainLines = mainCoverageReport.trim().split('\n'); + const mainLastLine = mainLines[mainLines.length - 1]; + const mainCoverageMatch = mainLastLine.match(/total:\s+\(statements\)\s+(\d+\.\d+)%/); + const mainCoveragePercentage = mainCoverageMatch ? mainCoverageMatch[1] : 'unknown'; + + // Read reservations module coverage report + let reservationsCoverageReport = ''; + let reservationsCoveragePercentage = 'unknown'; + try { + reservationsCoverageReport = fs.readFileSync('reservations_func_coverage.txt', 'utf8'); + const reservationsLines = reservationsCoverageReport.trim().split('\n'); + const reservationsLastLine = reservationsLines[reservationsLines.length - 1]; + const reservationsCoverageMatch = reservationsLastLine.match(/total:\s+\(statements\)\s+(\d+\.\d+)%/); + reservationsCoveragePercentage = reservationsCoverageMatch ? reservationsCoverageMatch[1] : 'unknown'; + } catch (error) { + reservationsCoverageReport = 'No coverage data available'; + } let commentBody = '\n'; + commentBody += '## Test Coverage Report\n\n'; + + // Main module coverage commentBody += '
\n'; - commentBody += 'Coverage in go module internal/: '; - commentBody += coveragePercentage; + commentBody += 'Coverage in main module (internal/): '; + commentBody += mainCoveragePercentage; commentBody += '%\n\n'; commentBody += '```text\n'; - commentBody += coverageReport; + commentBody += mainCoverageReport; + commentBody += '```\n'; + commentBody += '
\n\n'; + + // Reservations module coverage + commentBody += '
\n'; + commentBody += 'Coverage in reservations module (reservations/internal/): '; + commentBody += reservationsCoveragePercentage; + commentBody += '%\n\n'; + commentBody += '```text\n'; + commentBody += reservationsCoverageReport; commentBody += '```\n'; commentBody += '
\n'; @@ -110,4 +152,4 @@ jobs: owner: context.repo.owner, repo: context.repo.repo, body: commentBody, - }); \ No newline at end of file + }); diff --git a/.github/workflows/update-appversion.yml b/.github/workflows/update-appversion.yml index 17c414e53..1a6fd96d2 100644 --- a/.github/workflows/update-appversion.yml +++ b/.github/workflows/update-appversion.yml @@ -55,3 +55,17 @@ jobs: git add helm/library/cortex-postgres/Chart.yaml git commit -m "Bump cortex-postgres chart appVersions to ${{ steps.vars.outputs.sha }} [skip ci]" || echo "No changes to commit" git push origin HEAD:main + + # Only bumped if there are changes in the reservations directory. + - name: Update appVersion in cortex-reservations Chart.yaml + if: steps.changed_reservations_files.outputs.all_changed_files != '' + run: | + sed -i 's/^\([ ]*appVersion:[ ]*\).*/\1"${{ steps.vars.outputs.sha }}"/' reservations/dist/chart/Chart.yaml + - name: Commit and push changes for cortex-reservations + if: steps.changed_reservations_files.outputs.all_changed_files != '' + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add reservations/dist/chart/Chart.yaml + git commit -m "Bump cortex-reservations chart appVersions to ${{ steps.vars.outputs.sha }} [skip ci]" || echo "No changes to commit" + git push origin HEAD:main diff --git a/.gitignore b/.gitignore index 09f9d5996..dc976bb80 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ build/** # Test binary, built with `go test -c` *.test +bin/ + # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/Dockerfile.kubebuilder b/Dockerfile.kubebuilder new file mode 100644 index 000000000..aa302e4da --- /dev/null +++ b/Dockerfile.kubebuilder @@ -0,0 +1,31 @@ +# Build the manager binary +FROM golang:1.24 AS builder +ARG TARGETOS +ARG TARGETARCH +# Path of our go.mod +ARG GO_MOD_PATH=. + +WORKDIR /workspace +# Copy shared cortex code +COPY . / +# Copy the Go Modules manifests +COPY ${GO_MOD_PATH} . +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/Tiltfile b/Tiltfile index 7d0582c04..73f1f2448 100644 --- a/Tiltfile +++ b/Tiltfile @@ -11,6 +11,7 @@ if not os.getenv('TILT_VALUES_PATH'): fail("TILT_VALUES_PATH is not set.") if not os.path.exists(os.getenv('TILT_VALUES_PATH')): fail("TILT_VALUES_PATH "+ os.getenv('TILT_VALUES_PATH') + " does not exist.") +tilt_values = os.getenv('TILT_VALUES_PATH') # The upgrade job may take a long time to run, so it is disabled by default. enable_postgres_upgrade = False @@ -22,6 +23,23 @@ helm_repo( labels=['Repositories'], ) +def kubebuilder_binary_files(path): + """ + Return all usual binary files in a kubebuilder operator path. + Can be used to perform selective watching on code paths for docker builds. + """ + return [path + '/cmd', path + '/api', path + '/internal', path + '/go.mod', path + '/go.sum'] + +########### Reservations Operator & CRDs +docker_build('ghcr.io/cobaltcore-dev/cortex-reservations-operator', '.', + dockerfile='Dockerfile.kubebuilder', + build_args={'GO_MOD_PATH': 'reservations'}, + only=kubebuilder_binary_files('reservations') + ['internal/', 'go.mod', 'go.sum'], +) +local('sh helm/sync.sh reservations/dist/chart') +k8s_yaml(helm('reservations/dist/chart', name='cortex-reservations', values=[tilt_values])) +k8s_resource('reservations-controller-manager', labels=['Reservations']) + ########### Dev Dependencies local('sh helm/sync.sh helm/dev/cortex-prometheus-operator') k8s_yaml(helm('./helm/dev/cortex-prometheus-operator', name='cortex-prometheus-operator')) # Operator @@ -58,9 +76,9 @@ k8s_resource('cortex-plutono', port_forwards=[ ], labels=['Monitoring']) ########### Cortex Bundles -tilt_values = os.getenv('TILT_VALUES_PATH') docker_build('ghcr.io/cobaltcore-dev/cortex', '.', only=[ 'internal/', 'commands/', 'main.go', 'go.mod', 'go.sum', 'Makefile', + 'reservations/api/', # API module of the reservations operator needed for the scheduler. ]) docker_build('ghcr.io/cobaltcore-dev/cortex-postgres', 'postgres') diff --git a/cortex.secrets.example.yaml b/cortex.secrets.example.yaml index 22627bb56..7c6aa6438 100644 --- a/cortex.secrets.example.yaml +++ b/cortex.secrets.example.yaml @@ -20,6 +20,21 @@ sharedSSOCert: &sharedSSOCert # If true, the certificate is not verified. selfSigned: false +# Shared keystone credentials to use. +keystone: &keystone: + url: https://path-to-keystone/v3 + sso: *sharedSSOCert + username: openstack-user-with-all-project-read-access + password: openstack-user-password + projectName: openstack-project-of-user + userDomainName: openstack-domain-of-user + projectDomainName: openstack-domain-of-project-scoped-to + +# Custom configuration for the reservations operator. +reservations: + secrets: + keystone: *keystone + # These values will be shared across cortex-nova, cortex-manila, ... locally. cortex-core: secrets: @@ -42,11 +57,4 @@ cortex-core: sso: *sharedSSOCert provides: [netapp_aggregate_labels_metric, netapp_node_metric] # Override the endpoints and credentials to your OpenStack. - keystone: - url: https://path-to-keystone/v3 - sso: *sharedSSOCert - username: openstack-user-with-all-project-read-access - password: openstack-user-password - projectName: openstack-project-of-user - userDomainName: openstack-domain-of-user - projectDomainName: openstack-domain-of-project-scoped-to + keystone: *keystone diff --git a/go.mod b/go.mod index 3d4859a46..5c39fe57f 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,17 @@ module github.com/cobaltcore-dev/cortex -go 1.24 +go 1.24.0 -replace github.com/cobaltcore-dev/cortex/commands => ./commands +toolchain go1.24.2 -replace github.com/cobaltcore-dev/cortex/testlib => ./testlib +replace ( + github.com/cobaltcore-dev/cortex/commands => ./commands + github.com/cobaltcore-dev/cortex/reservations/api => ./reservations/api + github.com/cobaltcore-dev/cortex/testlib => ./testlib +) require ( + github.com/cobaltcore-dev/cortex/reservations/api v0.0.0-00010101000000-000000000000 github.com/dlmiddlecote/sqlstats v1.0.2 github.com/eclipse/paho.mqtt.golang v1.5.0 github.com/go-gorp/gorp v2.2.0+incompatible @@ -26,20 +31,59 @@ require ( github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/containerd/continuity v0.4.5 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-sql-driver/mysql v1.9.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/sys/user v0.4.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runc v1.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/poy/onpar v1.1.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/ziutek/mymysql v1.5.4 // indirect golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools v2.2.0+incompatible // indirect + k8s.io/api v0.33.0 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/client-go v0.33.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( @@ -47,7 +91,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/golang-migrate/migrate/v4 v4.18.3 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -57,7 +101,9 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sync v0.15.0 // indirect + golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.35.0 // indirect google.golang.org/protobuf v1.36.7 // indirect + k8s.io/apimachinery v0.33.4 + sigs.k8s.io/controller-runtime v0.21.0 ) diff --git a/go.sum b/go.sum index daeef1200..a4e30b9f3 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -34,6 +36,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -53,10 +56,20 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/eclipse/paho.mqtt.golang v1.5.0 h1:EH+bUVJNgttidWFkLLVKaQPGmkTUfQQqjOsyvMGvD6o= github.com/eclipse/paho.mqtt.golang v1.5.0/go.mod h1:du/2qNQVqJf/Sqs4MEL77kR8QTqANF7XU7Fk0aOTAgk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gorp/gorp v2.2.0+incompatible h1:xAUh4QgEeqPPhK3vxZN+bzrim1z5Av6q837gtjUlshc= github.com/go-gorp/gorp v2.2.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E= @@ -68,9 +81,21 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -84,16 +109,27 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo= github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -107,23 +143,35 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -140,9 +188,12 @@ github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85 github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -150,6 +201,11 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -195,6 +251,8 @@ github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7D github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sapcc/go-api-declarations v1.17.3 h1:ILRfsFD9ChSeekyzlDInLMqjC830gBcyK6ULlFdP45I= github.com/sapcc/go-api-declarations v1.17.3/go.mod h1:MWmLjmvjftgyAugNUfIhsDsHIzXH1pn32cWLZpiluKg= @@ -214,19 +272,32 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -248,13 +319,19 @@ go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwE go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -264,16 +341,20 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -284,20 +365,36 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -307,6 +404,12 @@ google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -317,3 +420,28 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/helm/bundles/cortex-cinder/Chart.yaml b/helm/bundles/cortex-cinder/Chart.yaml index 5653317d6..d78ccfafe 100644 --- a/helm/bundles/cortex-cinder/Chart.yaml +++ b/helm/bundles/cortex-cinder/Chart.yaml @@ -10,7 +10,7 @@ appVersion: 0.1.0 dependencies: - name: cortex-core repository: oci://ghcr.io/cobaltcore-dev/cortex/charts - version: 0.24.8 + version: 0.24.9 - name: cortex-postgres repository: oci://ghcr.io/cobaltcore-dev/cortex/charts version: 0.5.3 diff --git a/helm/bundles/cortex-manila/Chart.yaml b/helm/bundles/cortex-manila/Chart.yaml index bb9a26552..dfbeac341 100644 --- a/helm/bundles/cortex-manila/Chart.yaml +++ b/helm/bundles/cortex-manila/Chart.yaml @@ -10,7 +10,7 @@ appVersion: 0.1.0 dependencies: - name: cortex-core repository: oci://ghcr.io/cobaltcore-dev/cortex/charts - version: 0.24.8 + version: 0.24.9 - name: cortex-postgres repository: oci://ghcr.io/cobaltcore-dev/cortex/charts version: 0.5.3 diff --git a/helm/bundles/cortex-nova/Chart.yaml b/helm/bundles/cortex-nova/Chart.yaml index 8a041d9a5..2e2f14875 100644 --- a/helm/bundles/cortex-nova/Chart.yaml +++ b/helm/bundles/cortex-nova/Chart.yaml @@ -10,7 +10,7 @@ appVersion: 0.1.0 dependencies: - name: cortex-core repository: oci://ghcr.io/cobaltcore-dev/cortex/charts - version: 0.24.8 + version: 0.24.9 - name: cortex-postgres repository: oci://ghcr.io/cobaltcore-dev/cortex/charts version: 0.5.3 diff --git a/helm/library/cortex-core/Chart.yaml b/helm/library/cortex-core/Chart.yaml index b4cd74356..f21b8dca7 100644 --- a/helm/library/cortex-core/Chart.yaml +++ b/helm/library/cortex-core/Chart.yaml @@ -5,5 +5,5 @@ apiVersion: v2 name: cortex-core description: A Helm chart deploying Cortex core services. type: application -version: 0.24.8 appVersion: "sha-2eb529c" +version: 0.24.9 diff --git a/helm/library/cortex-core/templates/deployment.yaml b/helm/library/cortex-core/templates/deployment.yaml index fbd8c2551..9b1038870 100644 --- a/helm/library/cortex-core/templates/deployment.yaml +++ b/helm/library/cortex-core/templates/deployment.yaml @@ -27,6 +27,7 @@ spec: app: {{ include "cortex.fullname" $ }}-{{ .name }} {{- include "cortex.labels" $ | nindent 8 }} spec: + serviceAccountName: {{ include "cortex.fullname" $ }} containers: - name: {{ include "cortex.fullname" $ }}-{{ .name }} args: {{ .args | toJson }} @@ -83,6 +84,7 @@ spec: app: {{ include "cortex.fullname" $ }}-cli {{- include "cortex.labels" $ | nindent 8 }} spec: + serviceAccountName: {{ include "cortex.fullname" $ }} containers: - name: {{ include "cortex.fullname" $ }}-cli command: diff --git a/helm/library/cortex-core/templates/rbac.yaml b/helm/library/cortex-core/templates/rbac.yaml new file mode 100644 index 000000000..96c1814e7 --- /dev/null +++ b/helm/library/cortex-core/templates/rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "cortex.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + # From the reservations operator. + name: computereservation-viewer-role +subjects: +- kind: ServiceAccount + name: {{ include "cortex.fullname" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "cortex.fullname" . }} diff --git a/internal/conf/conf.go b/internal/conf/conf.go index 9756ae919..d07d53fc9 100644 --- a/internal/conf/conf.go +++ b/internal/conf/conf.go @@ -417,7 +417,7 @@ type Config interface { Validate() error } -type config struct { +type SharedConfig struct { // The checks to run, in this particular order. Checks []string `json:"checks"` @@ -435,8 +435,13 @@ type config struct { } // Create a new configuration from the default config json file. -// Also inject environment variables into the configuration. -func NewConfig() Config { +// +// This will read two files: +// - /etc/config/conf.json +// - /etc/secrets/secrets.json +// +// The values read from secrets.json will override the values in conf.json +func NewConfig[C any]() C { // Note: We need to read the config as a raw map first, to avoid golang // unmarshalling default values for the fields. @@ -450,10 +455,10 @@ func NewConfig() Config { if err != nil { panic(err) } - return newConfigFromMaps(cmConf, secretConf) + return newConfigFromMaps[C](cmConf, secretConf) } -func newConfigFromMaps(base, override map[string]any) Config { +func newConfigFromMaps[C any](base, override map[string]any) C { // Merge the base config with the override config. mergedConf := mergeMaps(base, override) // Marshal again, and then unmarshal into the config struct. @@ -461,11 +466,11 @@ func newConfigFromMaps(base, override map[string]any) Config { if err != nil { panic(err) } - var c config + var c C if err := json.Unmarshal(mergedBytes, &c); err != nil { panic(err) } - return &c + return c } // Read the json as a map from the given file path. @@ -513,15 +518,15 @@ func mergeMaps(dst, src map[string]any) map[string]any { return result } -func (c *config) GetChecks() []string { return c.Checks } -func (c *config) GetLoggingConfig() LoggingConfig { return c.LoggingConfig } -func (c *config) GetDBConfig() DBConfig { return c.DBConfig } -func (c *config) GetSyncConfig() SyncConfig { return c.SyncConfig } -func (c *config) GetExtractorConfig() ExtractorConfig { return c.ExtractorConfig } -func (c *config) GetSchedulerConfig() SchedulerConfig { return c.SchedulerConfig } -func (c *config) GetDeschedulerConfig() DeschedulerConfig { return c.DeschedulerConfig } -func (c *config) GetKPIsConfig() KPIsConfig { return c.KPIsConfig } -func (c *config) GetMonitoringConfig() MonitoringConfig { return c.MonitoringConfig } -func (c *config) GetMQTTConfig() MQTTConfig { return c.MQTTConfig } -func (c *config) GetAPIConfig() APIConfig { return c.APIConfig } -func (c *config) GetKeystoneConfig() KeystoneConfig { return c.KeystoneConfig } +func (c *SharedConfig) GetChecks() []string { return c.Checks } +func (c *SharedConfig) GetLoggingConfig() LoggingConfig { return c.LoggingConfig } +func (c *SharedConfig) GetDBConfig() DBConfig { return c.DBConfig } +func (c *SharedConfig) GetSyncConfig() SyncConfig { return c.SyncConfig } +func (c *SharedConfig) GetExtractorConfig() ExtractorConfig { return c.ExtractorConfig } +func (c *SharedConfig) GetSchedulerConfig() SchedulerConfig { return c.SchedulerConfig } +func (c *SharedConfig) GetDeschedulerConfig() DeschedulerConfig { return c.DeschedulerConfig } +func (c *SharedConfig) GetKPIsConfig() KPIsConfig { return c.KPIsConfig } +func (c *SharedConfig) GetMonitoringConfig() MonitoringConfig { return c.MonitoringConfig } +func (c *SharedConfig) GetMQTTConfig() MQTTConfig { return c.MQTTConfig } +func (c *SharedConfig) GetAPIConfig() APIConfig { return c.APIConfig } +func (c *SharedConfig) GetKeystoneConfig() KeystoneConfig { return c.KeystoneConfig } diff --git a/internal/conf/conf_test.go b/internal/conf/conf_test.go index 5afa97c80..23e7ff34c 100644 --- a/internal/conf/conf_test.go +++ b/internal/conf/conf_test.go @@ -126,7 +126,7 @@ func TestNewConfig(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - config := newConfigFromMaps(rawConfig, nil) + config := newConfigFromMaps[*SharedConfig](rawConfig, nil) // Test SyncConfig syncConfig := config.GetSyncConfig() diff --git a/internal/conf/validation.go b/internal/conf/validation.go index 4f35150ce..7be8e52c8 100644 --- a/internal/conf/validation.go +++ b/internal/conf/validation.go @@ -33,7 +33,7 @@ type DependencyConfig struct { } // Validate if the dependencies are satisfied in the given config. -func (deps *DependencyConfig) validate(c config) error { +func (deps *DependencyConfig) validate(c SharedConfig) error { confedNovaObjects := make(map[string]bool) for _, objectType := range c.OpenStack.Nova.Types { confedNovaObjects[objectType] = true @@ -88,7 +88,7 @@ func (deps *DependencyConfig) validate(c config) error { } // Check if all dependencies are satisfied. -func (c *config) Validate() error { +func (c *SharedConfig) Validate() error { for _, extractor := range c.ExtractorConfig.Plugins { if err := extractor.validate(*c); err != nil { return err diff --git a/internal/conf/validation_test.go b/internal/conf/validation_test.go index 91bfb801b..f87bd42b2 100644 --- a/internal/conf/validation_test.go +++ b/internal/conf/validation_test.go @@ -87,7 +87,7 @@ func TestValidConf(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -138,7 +138,7 @@ func TestInvalidConf_MissingNovaDependency(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err == nil { t.Fatalf("expected error, got nil") } @@ -162,7 +162,7 @@ func TestInvalidConf_MissingResourceProviders(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err == nil { t.Fatalf("expected error, got nil") } @@ -184,7 +184,7 @@ func TestInvalidConf_InvalidServiceAvailability(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err == nil { t.Fatalf("expected error, got nil") } @@ -210,7 +210,7 @@ func TestInvalidConf_MissingHost(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err == nil { t.Fatalf("expected error, got nil") } @@ -237,7 +237,7 @@ func TestInvalidConf_MissingFeatureForKPI(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if len(conf.GetKPIsConfig().Plugins) == 0 { t.Fatalf("expected plugins, got none") } @@ -266,7 +266,7 @@ func TestInvalidConf_NovaSchedulerDependency(t *testing.T) { if err != nil { t.Fatalf("Failed to read config: %v", err) } - conf := newConfigFromMaps(rawConf, nil) + conf := newConfigFromMaps[*SharedConfig](rawConf, nil) if err := conf.Validate(); err == nil { t.Fatalf("expected error, got nil") } diff --git a/internal/mqtt/mqtt.go b/internal/mqtt/mqtt.go index 8af0b6f25..b8f039f01 100644 --- a/internal/mqtt/mqtt.go +++ b/internal/mqtt/mqtt.go @@ -36,7 +36,8 @@ type client struct { } func NewClient(monitor Monitor) Client { - return NewClientWithConfig(conf.NewConfig().GetMQTTConfig(), monitor) + c := conf.NewConfig[conf.SharedConfig]() + return NewClientWithConfig(c.GetMQTTConfig(), monitor) } func NewClientWithConfig(conf conf.MQTTConfig, monitor Monitor) Client { diff --git a/internal/scheduler/nova/api/messages.go b/internal/scheduler/nova/api/messages.go index a94408756..47c679b7c 100644 --- a/internal/scheduler/nova/api/messages.go +++ b/internal/scheduler/nova/api/messages.go @@ -199,10 +199,10 @@ type NovaImageMeta struct { type NovaFlavor struct { ID int `json:"id"` Name string `json:"name"` - MemoryMB int `json:"memory_mb"` - VCPUs int `json:"vcpus"` - RootGB int `json:"root_gb"` - EphemeralGB int `json:"ephemeral_gb"` + MemoryMB uint64 `json:"memory_mb"` + VCPUs uint64 `json:"vcpus"` + RootGB uint64 `json:"root_gb"` + EphemeralGB uint64 `json:"ephemeral_gb"` FlavorID string `json:"flavorid"` Swap int `json:"swap"` RXTXFactor float64 `json:"rxtx_factor"` diff --git a/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity.go b/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity.go index 5173ac2a9..f427e479c 100644 --- a/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity.go +++ b/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity.go @@ -4,15 +4,47 @@ package shared import ( + "context" "log/slog" + "github.com/cobaltcore-dev/cortex/internal/conf" + "github.com/cobaltcore-dev/cortex/internal/db" "github.com/cobaltcore-dev/cortex/internal/extractor/plugins/shared" "github.com/cobaltcore-dev/cortex/internal/scheduler" "github.com/cobaltcore-dev/cortex/internal/scheduler/nova/api" + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" ) type FilterHasEnoughCapacity struct { scheduler.BaseStep[api.ExternalSchedulerRequest, scheduler.EmptyStepOpts] + + // Kubernetes client. + Client client.Client +} + +func (s *FilterHasEnoughCapacity) Init(alias string, db db.DB, opts conf.RawOpts) error { + if err := s.BaseStep.Init(alias, db, opts); err != nil { + return err + } + if s.Client != nil { + return nil // Already initialized. + } + scheme, err := v1alpha1.SchemeBuilder.Build() + if err != nil { + return err + } + clientConfig, err := ctrl.GetConfig() + if err != nil { + return err + } + cl, err := client.New(clientConfig, client.Options{Scheme: scheme}) + if err != nil { + return err + } + s.Client = cl + return nil } func (s *FilterHasEnoughCapacity) GetName() string { return "filter_has_enough_capacity" } @@ -27,35 +59,75 @@ func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.Externa ); err != nil { return nil, err } + var reservations v1alpha1.ComputeReservationList + ctx := context.Background() + if err := s.Client.List(ctx, &reservations); err != nil { + return nil, err + } + // Resources reserved by hosts. + vcpusReserved := make(map[string]uint64) // in vCPUs + memoryReserved := make(map[string]uint64) // in MB + diskReserved := make(map[string]uint64) // in GB + for _, reservation := range reservations.Items { + if reservation.Status.Phase != v1alpha1.ComputeReservationStatusPhaseActive { + continue // Only consider active reservations. + } + if reservation.Spec.Kind != v1alpha1.ComputeReservationSpecKindInstance { + continue // Not an instance reservation, skip it. + } + host := reservation.Status.Host + instance := reservation.Spec.Instance + vcpusReserved[host] += instance.VCPUs.AsDec().UnscaledBig().Uint64() + memoryReserved[host] += instance.Memory.AsDec().UnscaledBig().Uint64() / 1000000 + diskReserved[host] += instance.Disk.AsDec().UnscaledBig().Uint64() / 1000000000 + } + traceLog.Debug( + "reserved resources", + "vcpus", vcpusReserved, + "memory", memoryReserved, + "disk", diskReserved, + ) hostsEncountered := map[string]struct{}{} for _, utilization := range hostUtilizations { hostsEncountered[utilization.ComputeHost] = struct{}{} - if int(utilization.TotalVCPUsAllocatable) < request.Spec.Data.Flavor.Data.VCPUs { + vCPUsAllocatable := uint64(utilization.TotalVCPUsAllocatable) + if reserved, ok := vcpusReserved[utilization.ComputeHost]; ok { + vCPUsAllocatable -= reserved + } + if vCPUsAllocatable < request.Spec.Data.Flavor.Data.VCPUs { slog.Debug( "Filtering host due to insufficient VCPU capacity", slog.String("host", utilization.ComputeHost), - slog.Int("requested_vcpus", request.Spec.Data.Flavor.Data.VCPUs), - slog.Int("available_vcpus", int(utilization.TotalVCPUsAllocatable)), + slog.Uint64("requested_vcpus", request.Spec.Data.Flavor.Data.VCPUs), + slog.Float64("available_vcpus", utilization.TotalVCPUsAllocatable), ) delete(result.Activations, utilization.ComputeHost) continue } - if int(utilization.TotalMemoryAllocatableMB) < request.Spec.Data.Flavor.Data.MemoryMB { + memoryAllocatableMB := uint64(utilization.TotalMemoryAllocatableMB) + if reserved, ok := memoryReserved[utilization.ComputeHost]; ok { + memoryAllocatableMB -= reserved + } + if memoryAllocatableMB < request.Spec.Data.Flavor.Data.MemoryMB { slog.Debug( "Filtering host due to insufficient RAM capacity", slog.String("host", utilization.ComputeHost), - slog.Int("requested_mb", request.Spec.Data.Flavor.Data.MemoryMB), - slog.Int("available_mb", int(utilization.TotalMemoryAllocatableMB)), + slog.Uint64("requested_mb", request.Spec.Data.Flavor.Data.MemoryMB), + slog.Float64("available_mb", utilization.TotalMemoryAllocatableMB), ) delete(result.Activations, utilization.ComputeHost) continue } - if int(utilization.TotalDiskAllocatableGB) < request.Spec.Data.Flavor.Data.RootGB { + diskAllocatableGB := uint64(utilization.TotalDiskAllocatableGB) + if reserved, ok := diskReserved[utilization.ComputeHost]; ok { + diskAllocatableGB -= reserved + } + if diskAllocatableGB < request.Spec.Data.Flavor.Data.RootGB { slog.Debug( "Filtering host due to insufficient Disk capacity", slog.String("host", utilization.ComputeHost), - slog.Int("requested_gb", request.Spec.Data.Flavor.Data.RootGB), - slog.Int("available_gb", int(utilization.TotalDiskAllocatableGB)), + slog.Uint64("requested_gb", request.Spec.Data.Flavor.Data.RootGB), + slog.Float64("available_gb", utilization.TotalDiskAllocatableGB), ) delete(result.Activations, utilization.ComputeHost) continue diff --git a/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity_test.go b/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity_test.go index 08eabb12b..0187675d2 100644 --- a/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity_test.go +++ b/internal/scheduler/nova/plugins/shared/filter_has_enough_capacity_test.go @@ -11,9 +11,33 @@ import ( "github.com/cobaltcore-dev/cortex/internal/db" "github.com/cobaltcore-dev/cortex/internal/extractor/plugins/shared" "github.com/cobaltcore-dev/cortex/internal/scheduler/nova/api" + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" testlibDB "github.com/cobaltcore-dev/cortex/testlib/db" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +// Create a runtime scheme with all cortex CRDs registered. +func testScheme() *runtime.Scheme { + scheme, err := v1alpha1.SchemeBuilder.Build() + if err != nil { + panic(err) + } + return scheme +} + +// Create a fake kubernetes client with no runtime objects. +func testClient() client.Client { + var runtimeObjects []runtime.Object // None + return fake.NewClientBuilder(). + WithScheme(testScheme()). + WithRuntimeObjects(runtimeObjects...). + Build() +} + func TestFilterHasEnoughCapacity_Run(t *testing.T) { dbEnv := testlibDB.SetupDBEnv(t) testDB := db.DB{DbMap: dbEnv.DbMap} @@ -375,9 +399,11 @@ func TestFilterHasEnoughCapacity_Run(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { step := &FilterHasEnoughCapacity{} + step.Client = testClient() // Override the real client with our fake client if err := step.Init("", testDB, conf.NewRawOpts("{}")); err != nil { t.Fatalf("expected no error, got %v", err) } + // Override the real client with our fake client after Init() result, err := step.Run(slog.Default(), tt.request) if err != nil { t.Fatalf("expected no error, got %v", err) @@ -405,7 +431,7 @@ func TestFilterHasEnoughCapacity_Run(t *testing.T) { } } -func TestFilterHasEnoughCapacity_EdgeCases(t *testing.T) { +func TestFilterHasEnoughCapacity_WithReservations(t *testing.T) { dbEnv := testlibDB.SetupDBEnv(t) testDB := db.DB{DbMap: dbEnv.DbMap} defer testDB.Close() @@ -419,213 +445,167 @@ func TestFilterHasEnoughCapacity_EdgeCases(t *testing.T) { t.Fatalf("expected no error, got %v", err) } - // Insert edge case data - hostUtilizationsEdgeCases := []any{ - &shared.HostUtilization{ComputeHost: "host1", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 1.5, TotalVCPUsAllocatable: 0.5, TotalDiskAllocatableGB: 0.5}, // Fractional capacity - &shared.HostUtilization{ComputeHost: "host2", RAMUtilizedPct: 0.0, VCPUsUtilizedPct: 0.0, DiskUtilizedPct: 0.0, TotalMemoryAllocatableMB: 1000000, TotalVCPUsAllocatable: 1000, TotalDiskAllocatableGB: 10000}, // Very large capacity - &shared.HostUtilization{ComputeHost: "host3", RAMUtilizedPct: 100.0, VCPUsUtilizedPct: 100.0, DiskUtilizedPct: 100.0, TotalMemoryAllocatableMB: -100, TotalVCPUsAllocatable: -10, TotalDiskAllocatableGB: -50}, // Negative capacity (edge case) + // Insert mock data into the feature_host_utilization table + hostUtilizations := []any{ + &shared.HostUtilization{ComputeHost: "host1", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 32768, TotalVCPUsAllocatable: 16, TotalDiskAllocatableGB: 1000}, // High capacity host + &shared.HostUtilization{ComputeHost: "host2", RAMUtilizedPct: 80.0, VCPUsUtilizedPct: 70.0, DiskUtilizedPct: 60.0, TotalMemoryAllocatableMB: 16384, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 500}, // Medium capacity host } - if err := testDB.Insert(hostUtilizationsEdgeCases...); err != nil { + if err := testDB.Insert(hostUtilizations...); err != nil { t.Fatalf("expected no error, got %v", err) } - tests := []struct { - name string - flavor api.NovaFlavor - expectedHosts []string - filteredHosts []string - }{ + // Create active reservations that consume resources on hosts + reservations := []v1alpha1.ComputeReservation{ { - name: "Fractional capacity vs integer requirements", - flavor: api.NovaFlavor{ - VCPUs: 1, - MemoryMB: 1, - RootGB: 1, + ObjectMeta: metav1.ObjectMeta{ + Name: "reservation-host1-1", + Namespace: "test-namespace", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + DomainID: "test-domain", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), // 4GB + VCPUs: *resource.NewQuantity(4, resource.DecimalSI), + Disk: *resource.NewQuantity(100*1024*1024*1024, resource.BinarySI), // 100GB + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + Host: "host1", }, - expectedHosts: []string{"host2"}, // Only host2 has enough capacity - filteredHosts: []string{"host1", "host3"}, // host1 has fractional capacity < 1, host3 has negative }, { - name: "Very large flavor vs very large capacity", - flavor: api.NovaFlavor{ - VCPUs: 500, - MemoryMB: 500000, - RootGB: 5000, + ObjectMeta: metav1.ObjectMeta{ + Name: "reservation-host2-1", + Namespace: "test-namespace", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + DomainID: "test-domain", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8GB + VCPUs: *resource.NewQuantity(4, resource.DecimalSI), + Disk: *resource.NewQuantity(200*1024*1024*1024, resource.BinarySI), // 200GB + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + Host: "host2", }, - expectedHosts: []string{"host2"}, // Only host2 has very large capacity - filteredHosts: []string{"host1", "host3"}, }, { - name: "Zero requirements", - flavor: api.NovaFlavor{ - VCPUs: 0, - MemoryMB: 0, - RootGB: 0, + ObjectMeta: metav1.ObjectMeta{ + Name: "reservation-inactive", + Namespace: "test-namespace", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + DomainID: "test-domain", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 16GB + VCPUs: *resource.NewQuantity(8, resource.DecimalSI), + Disk: *resource.NewQuantity(500*1024*1024*1024, resource.BinarySI), // 500GB + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseFailed, // Not active, should be ignored + Host: "host1", }, - expectedHosts: []string{"host1", "host2"}, // host3 with negative capacity gets filtered out - filteredHosts: []string{"host3"}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - request := api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: tt.flavor, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - } - - step := &FilterHasEnoughCapacity{} - if err := step.Init("", testDB, conf.NewRawOpts("{}")); err != nil { - t.Fatalf("expected no error, got %v", err) - } - result, err := step.Run(slog.Default(), request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations", host) - } - } - - // Check filtered hosts are not present - for _, host := range tt.filteredHosts { - if _, ok := result.Activations[host]; ok { - t.Errorf("expected host %s to be filtered out", host) - } - } + // Create fake Kubernetes client with reservations + scheme := testScheme() + var runtimeObjects []runtime.Object + for i := range reservations { + runtimeObjects = append(runtimeObjects, &reservations[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(runtimeObjects...). + Build() - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) - } - }) + step := &FilterHasEnoughCapacity{} + step.Client = fakeClient // Override the real client with our fake client + if err := step.Init("", testDB, conf.NewRawOpts("{}")); err != nil { + t.Fatalf("expected no error, got %v", err) } -} -func TestFilterHasEnoughCapacity_ResourceTypes(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer testDB.Close() - defer dbEnv.Close() + // Test case: Request that would fit on host1 without reservations, but not with reservations + request := api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + VCPUs: 14, // host1 has 16 total, 4 reserved = 12 available, so this should fail + MemoryMB: 16384, // host1 has 32768 total, 4000 reserved = 28768 available, so this should pass + RootGB: 500, // host1 has 1000 total, 100 reserved = 900 available, so this should pass + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + }, + } - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(shared.HostUtilization{}), - ) + result, err := step.Run(slog.Default(), request) if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert specialized capacity data for individual resource testing - hostUtilizationsResourceTypes := []any{ - &shared.HostUtilization{ComputeHost: "cpu-rich", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 8192, TotalVCPUsAllocatable: 64, TotalDiskAllocatableGB: 500}, // High CPU, medium RAM/disk - &shared.HostUtilization{ComputeHost: "ram-rich", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 131072, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 500}, // High RAM, medium CPU/disk - &shared.HostUtilization{ComputeHost: "disk-rich", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 8192, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 10000}, // High disk, medium CPU/RAM - &shared.HostUtilization{ComputeHost: "balanced", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalMemoryAllocatableMB: 16384, TotalVCPUsAllocatable: 16, TotalDiskAllocatableGB: 1000}, // Balanced resources + // Debug: Print the result to see what's happening + t.Logf("Result activations: %v", result.Activations) + + // host1 should be filtered out due to insufficient vCPUs after reservations (16 - 4 = 12 < 14) + if _, ok := result.Activations["host1"]; ok { + t.Error("expected host1 to be filtered out due to reservations consuming vCPUs") } - if err := testDB.Insert(hostUtilizationsResourceTypes...); err != nil { - t.Fatalf("expected no error, got %v", err) + + // host2 should be filtered out due to insufficient vCPUs (8 - 4 = 4 < 14) + if _, ok := result.Activations["host2"]; ok { + t.Error("expected host2 to be filtered out due to insufficient vCPUs") } - tests := []struct { - name string - flavor api.NovaFlavor - expectedHosts []string - description string - }{ - { - name: "CPU-intensive flavor", - flavor: api.NovaFlavor{ - VCPUs: 32, - MemoryMB: 4096, - RootGB: 100, - }, - expectedHosts: []string{"cpu-rich"}, // Only cpu-rich has 64 vCPUs, balanced has only 16 - description: "Should pass hosts with sufficient CPU", - }, - { - name: "RAM-intensive flavor", - flavor: api.NovaFlavor{ - VCPUs: 4, - MemoryMB: 65536, - RootGB: 100, - }, - expectedHosts: []string{"ram-rich"}, // Only ram-rich has 131072 MB, balanced has only 16384 MB - description: "Should pass hosts with sufficient RAM", - }, - { - name: "Disk-intensive flavor", - flavor: api.NovaFlavor{ - VCPUs: 4, - MemoryMB: 4096, - RootGB: 5000, + // Test case: Request that fits after accounting for reservations + request2 := api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + VCPUs: 10, // host1 has 16 - 4 = 12 available, so this should pass + MemoryMB: 20480, // host1 has 32768 - 4096 = 28672 available, so this should pass + RootGB: 800, // host1 has 1000 - 100 = 900 available, so this should pass + }, + }, }, - expectedHosts: []string{"disk-rich"}, - description: "Should pass hosts with sufficient disk", }, - { - name: "Multi-resource intensive flavor", - flavor: api.NovaFlavor{ - VCPUs: 16, - MemoryMB: 16384, - RootGB: 1000, - }, - expectedHosts: []string{"balanced"}, - description: "Should pass only balanced host with all resources", + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - request := api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: tt.flavor, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "cpu-rich"}, - {ComputeHost: "ram-rich"}, - {ComputeHost: "disk-rich"}, - {ComputeHost: "balanced"}, - }, - } - - step := &FilterHasEnoughCapacity{} - if err := step.Init("", testDB, conf.NewRawOpts("{}")); err != nil { - t.Fatalf("expected no error, got %v", err) - } - result, err := step.Run(slog.Default(), request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } + result2, err := step.Run(slog.Default(), request2) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations for %s", host, tt.description) - } - } + // host1 should pass (16-4=12 vCPUs >= 10, 32768-4096=28672 MB >= 20480, 1000-100=900 GB >= 800) + if _, ok := result2.Activations["host1"]; !ok { + t.Error("expected host1 to be available after accounting for reservations") + } - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d for %s", len(tt.expectedHosts), len(result.Activations), tt.description) - } - }) + // host2 should be filtered out (8-4=4 vCPUs < 10) + if _, ok := result2.Activations["host2"]; ok { + t.Error("expected host2 to be filtered out due to insufficient vCPUs after reservations") } } diff --git a/internal/sync/openstack/nova/nova_types.go b/internal/sync/openstack/nova/nova_types.go index 3af483cab..d3badbaf1 100644 --- a/internal/sync/openstack/nova/nova_types.go +++ b/internal/sync/openstack/nova/nova_types.go @@ -204,13 +204,13 @@ func (Hypervisor) Indexes() []db.Index { return nil } // See: https://docs.openstack.org/api-ref/compute/#list-flavors type Flavor struct { ID string `json:"id" db:"id,primarykey"` - Disk int `json:"disk" db:"disk"` // in GB. - RAM int `json:"ram" db:"ram"` // in MB. + Disk uint64 `json:"disk" db:"disk"` // in GB. + RAM uint64 `json:"ram" db:"ram"` // in MB. Name string `json:"name" db:"name"` RxTxFactor float64 `json:"rxtx_factor" db:"rxtx_factor"` - VCPUs int `json:"vcpus" db:"vcpus"` + VCPUs uint64 `json:"vcpus" db:"vcpus"` IsPublic bool `json:"os-flavor-access:is_public" db:"is_public"` - Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral" db:"ephemeral"` + Ephemeral uint64 `json:"OS-FLV-EXT-DATA:ephemeral" db:"ephemeral"` Description string `json:"description" db:"description"` // JSON string of extra specifications used when scheduling the flavor. diff --git a/main.go b/main.go index 067a79a0c..59a23d59c 100644 --- a/main.go +++ b/main.go @@ -169,7 +169,7 @@ func main() { // uses this to check if the binary was built correctly) bininfo.HandleVersionArgument() - config := conf.NewConfig() + config := conf.NewConfig[*conf.SharedConfig]() // Set the configured logger. config.GetLoggingConfig().SetDefaultLogger() if err := config.Validate(); err != nil { diff --git a/plutono/provisioning/dashboards/cortex-reservations.json b/plutono/provisioning/dashboards/cortex-reservations.json new file mode 100644 index 000000000..bcf694dfb --- /dev/null +++ b/plutono/provisioning/dashboards/cortex-reservations.json @@ -0,0 +1,1017 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Plutono --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 7, + "links": [], + "panels": [ + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 28, + "panels": [ + { + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 0, + "y": 9 + }, + "id": 22, + "options": { + "content": "# Readme\n\nReservations are generated from limes commitments that are for a concrete flavor (so called instance commitments). Commitments for bare resources are not processed by cortex as there is no way to tell where and how many vms would be placed. ", + "mode": "markdown" + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Readme", + "type": "text" + } + ], + "title": "Readme", + "type": "row" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 24, + "panels": [], + "title": "Reservations", + "type": "row" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "decgbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Disk Reserved" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 0, + "y": 2 + }, + "id": 12, + "options": { + "showHeader": false + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (host) (cortex_reservations_resources{status_phase=\"active\",resource=\"disk_gb\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Disk Reserved", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Disk Reserved", + "host": "Host" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "vCPUs" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "vCPUs Reserved" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 6, + "y": 2 + }, + "id": 15, + "options": { + "showHeader": false + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (host) (cortex_reservations_resources{status_phase=\"active\",resource=\"vcpus\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "vCPUs Reserved", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "vCPUs Reserved", + "host": "Host" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Memory Reserved" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 12, + "y": 2 + }, + "id": 16, + "options": { + "showHeader": false + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (host) (cortex_reservations_resources{status_phase=\"active\",resource=\"memory_mb\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "vCPUs Reserved", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Memory Reserved", + "host": "Host" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "disk_gb" + }, + "properties": [ + { + "id": "unit", + "value": "decgbytes" + }, + { + "id": "displayName", + "value": "Disk" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory_mb" + }, + "properties": [ + { + "id": "unit", + "value": "decmbytes" + }, + { + "id": "displayName", + "value": "Memory" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "vcpus" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "displayName", + "value": "vCPUs" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 18, + "y": 2 + }, + "id": 11, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (resource) (cortex_reservations_resources{status_phase=\"active\"})", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{ resource }}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Total Resources Reserved", + "transformations": [], + "type": "stat" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + } + ] + }, + "unit": "decgbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Quantity" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 0, + "y": 13 + }, + "id": 17, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (status_error) (cortex_reservations_resources{status_phase!=\"active\",resource=\"disk_gb\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Disk Not Reservable", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Quantity", + "host": "Host", + "status_error": "Reason" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + } + ] + }, + "unit": "vCPUs" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Quantity" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 6, + "y": 13 + }, + "id": 18, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (status_error) (cortex_reservations_resources{status_phase!=\"active\",resource=\"vcpus\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "vCPUs Not Reservable", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Quantity", + "host": "Host", + "status_error": "Reason" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Quantity" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "basic" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 12, + "y": 13 + }, + "id": 19, + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (status_error) (cortex_reservations_resources{status_phase!=\"active\",resource=\"memory_mb\"})", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "vCPUs Not Reservable", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Quantity", + "host": "Host", + "status_error": "Reason" + } + } + } + ], + "type": "table" + }, + { + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "disk_gb" + }, + "properties": [ + { + "id": "unit", + "value": "decgbytes" + }, + { + "id": "displayName", + "value": "Disk" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory_mb" + }, + "properties": [ + { + "id": "unit", + "value": "decmbytes" + }, + { + "id": "displayName", + "value": "Memory" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "vcpus" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "displayName", + "value": "vCPUs" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 18, + "y": 13 + }, + "id": 20, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.37", + "targets": [ + { + "exemplar": true, + "expr": "max by (resource) (cortex_reservations_resources{status_phase!=\"active\"})", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{ resource }}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Total Resources Not Reservable", + "transformations": [], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 26, + "panels": [], + "title": "Operator", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.37", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(controller_runtime_reconcile_time_seconds_bucket{controller=\"computereservation\"}[2m])) by (le))", + "interval": "", + "legendFormat": "Compute reservations controller", + "refId": "A" + } + ], + "thresholds": [ + { + "$$hashKey": "object:1853", + "colorMode": "critical", + "fill": true, + "line": false, + "op": "gt", + "value": 5, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Time to reconcile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:438", + "format": "s", + "label": null, + "logBase": 1, + "max": "6.5", + "min": null, + "show": true + }, + { + "$$hashKey": "object:439", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus-openstack", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 25 + }, + "hiddenSeries": false, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.37", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "exemplar": true, + "expr": "rate(controller_runtime_reconcile_errors_total{controller=\"computereservation\"}[2m])", + "interval": "", + "legendFormat": "Errors compute reservations controller", + "refId": "A" + }, + { + "exemplar": true, + "expr": "rate(controller_runtime_reconcile_panics_total{controller=\"computereservation\"}[2m])", + "hide": false, + "interval": "", + "legendFormat": "Panics compute reservations controller", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reconcile issues rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:438", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:439", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "1m", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Cortex Reservations", + "uid": "cortex-reservations", + "version": 1 +} \ No newline at end of file diff --git a/plutono/provisioning/dashboards/cortex-reservations.license b/plutono/provisioning/dashboards/cortex-reservations.license new file mode 100644 index 000000000..2daded695 --- /dev/null +++ b/plutono/provisioning/dashboards/cortex-reservations.license @@ -0,0 +1,2 @@ +# Copyright 2025 SAP SE +# SPDX-License-Identifier: Apache-2.0 \ No newline at end of file diff --git a/reservations/LICENSE b/reservations/LICENSE new file mode 100644 index 000000000..06c1fb237 --- /dev/null +++ b/reservations/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 SAP SE or an SAP affiliate company and cobaltcore-dev contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/reservations/Makefile b/reservations/Makefile new file mode 100644 index 000000000..772466dcd --- /dev/null +++ b/reservations/Makefile @@ -0,0 +1,50 @@ +.PHONY: all +all: build + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +##@ Build + +.PHONY: build +build: manifests generate + +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen + +CONTROLLER_TOOLS_VERSION ?= v0.17.2 + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef \ No newline at end of file diff --git a/reservations/PROJECT b/reservations/PROJECT new file mode 100644 index 000000000..061ec6115 --- /dev/null +++ b/reservations/PROJECT @@ -0,0 +1,22 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +cliVersion: 4.7.1 +domain: cortex +layout: +- go.kubebuilder.io/v4 +plugins: + helm.kubebuilder.io/v1-alpha: {} +projectName: reservations +repo: github.com/cobaltcore-dev/cortex/reservations +resources: +- api: + crdVersion: v1 + controller: true + domain: cortex + group: reservations + kind: ComputeReservation + path: github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/reservations/api/LICENSE b/reservations/api/LICENSE new file mode 100644 index 000000000..06c1fb237 --- /dev/null +++ b/reservations/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 SAP SE or an SAP affiliate company and cobaltcore-dev contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/reservations/api/go.mod b/reservations/api/go.mod new file mode 100644 index 000000000..89d5c5317 --- /dev/null +++ b/reservations/api/go.mod @@ -0,0 +1,27 @@ +module github.com/cobaltcore-dev/cortex/reservations/api + +go 1.24.0 + +require ( + k8s.io/apimachinery v0.33.4 + sigs.k8s.io/controller-runtime v0.21.0 +) + +require ( + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/text v0.23.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/reservations/api/go.sum b/reservations/api/go.sum new file mode 100644 index 000000000..6ac0e93cb --- /dev/null +++ b/reservations/api/go.sum @@ -0,0 +1,100 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/reservations/api/v1alpha1/computereservation_types.go b/reservations/api/v1alpha1/computereservation_types.go new file mode 100644 index 000000000..dfc8964a6 --- /dev/null +++ b/reservations/api/v1alpha1/computereservation_types.go @@ -0,0 +1,117 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// The kind of reservation. +type ComputeReservationSpecKind string + +const ( + // Reservation for a specific virtual machine configuration. + ComputeReservationSpecKindInstance ComputeReservationSpecKind = "instance" + // Reservation for a bare resource. + ComputeReservationSpecKindBareResource ComputeReservationSpecKind = "bare" +) + +// Specification for an instance reservation. +type ComputeReservationSpecInstance struct { + // The flavor name of the instance to reserve. + Flavor string `json:"flavor"` + // The memory to reserve (e.g., "1Gi", "512Mi"). + Memory resource.Quantity `json:"memory"` + // The number of vCPUs to reserve (e.g., "2", "500m"). + VCPUs resource.Quantity `json:"vCPUs"` + // The disk space to reserve (e.g., "10Gi", "500Mi"). + Disk resource.Quantity `json:"disk"` + // Extra specifications for the instance. + ExtraSpecs map[string]string `json:"extraSpecs,omitempty"` +} + +// Specification for a bare resource reservation +type ComputeReservationSpecBareResource struct { + // The amount of CPU to reserve (e.g., "2", "500m"). + CPU resource.Quantity `json:"cpu"` + // The amount of memory to reserve (e.g., "1Gi", "512Mi"). + Memory resource.Quantity `json:"memory"` + // The amount of disk space to reserve (e.g., "10Gi", "500Mi"). + Disk resource.Quantity `json:"disk"` +} + +// ComputeReservationSpec defines the desired state of ComputeReservation. +type ComputeReservationSpec struct { + Kind ComputeReservationSpecKind `json:"kind"` + + // The project ID to reserve for. + ProjectID string `json:"projectID"` + // The domain ID to reserve for. + DomainID string `json:"domainID"` + + // If reservation kind is instance, this field will contain metadata + // necessary to determine if the instance reservation can be fulfilled. + Instance ComputeReservationSpecInstance `json:"instance,omitempty"` + // If reservation kind is bare resource, this field will contain metadata + // necessary to determine if the bare resource reservation can be fulfilled. + BareResource ComputeReservationSpecBareResource `json:"bareResource,omitempty"` +} + +// The phase in which the reservation is. +type ComputeReservationStatusPhase string + +const ( + // The reservation has been placed and is considered during scheduling. + ComputeReservationStatusPhaseActive ComputeReservationStatusPhase = "active" + // The reservation could not be fulfilled. + ComputeReservationStatusPhaseFailed ComputeReservationStatusPhase = "failed" +) + +// ComputeReservationStatus defines the observed state of ComputeReservation. +type ComputeReservationStatus struct { + // The current phase of the reservation. + Phase ComputeReservationStatusPhase `json:"phase,omitempty"` + // An error explaining why the reservation is failed, if applicable. + Error string `json:"error,omitempty"` + // The name of the compute host that was allocated. + Host string `json:"host"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=cres +// +kubebuilder:printcolumn:name="Host",type="string",JSONPath=".status.host" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error" + +// ComputeReservation is the Schema for the computereservations API +type ComputeReservation struct { + metav1.TypeMeta `json:",inline"` + + // metadata is a standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + // spec defines the desired state of ComputeReservation + // +required + Spec ComputeReservationSpec `json:"spec"` + + // status defines the observed state of ComputeReservation + // +optional + Status ComputeReservationStatus `json:"status,omitempty,omitzero"` +} + +// +kubebuilder:object:root=true + +// ComputeReservationList contains a list of ComputeReservation +type ComputeReservationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ComputeReservation `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ComputeReservation{}, &ComputeReservationList{}) +} diff --git a/reservations/api/v1alpha1/groupversion_info.go b/reservations/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..deb3f511f --- /dev/null +++ b/reservations/api/v1alpha1/groupversion_info.go @@ -0,0 +1,23 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +// Package v1alpha1 contains API Schema definitions for the reservations v1alpha1 API group. +// +kubebuilder:object:generate=true +// +groupName=reservations.cortex +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "reservations.cortex", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/reservations/api/v1alpha1/zz_generated.deepcopy.go b/reservations/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a9b172c2a --- /dev/null +++ b/reservations/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,146 @@ +//go:build !ignore_autogenerated + +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservation) DeepCopyInto(out *ComputeReservation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservation. +func (in *ComputeReservation) DeepCopy() *ComputeReservation { + if in == nil { + return nil + } + out := new(ComputeReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeReservation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservationList) DeepCopyInto(out *ComputeReservationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComputeReservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservationList. +func (in *ComputeReservationList) DeepCopy() *ComputeReservationList { + if in == nil { + return nil + } + out := new(ComputeReservationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeReservationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservationSpec) DeepCopyInto(out *ComputeReservationSpec) { + *out = *in + in.Instance.DeepCopyInto(&out.Instance) + in.BareResource.DeepCopyInto(&out.BareResource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservationSpec. +func (in *ComputeReservationSpec) DeepCopy() *ComputeReservationSpec { + if in == nil { + return nil + } + out := new(ComputeReservationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservationSpecBareResource) DeepCopyInto(out *ComputeReservationSpecBareResource) { + *out = *in + out.CPU = in.CPU.DeepCopy() + out.Memory = in.Memory.DeepCopy() + out.Disk = in.Disk.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservationSpecBareResource. +func (in *ComputeReservationSpecBareResource) DeepCopy() *ComputeReservationSpecBareResource { + if in == nil { + return nil + } + out := new(ComputeReservationSpecBareResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservationSpecInstance) DeepCopyInto(out *ComputeReservationSpecInstance) { + *out = *in + out.Memory = in.Memory.DeepCopy() + out.VCPUs = in.VCPUs.DeepCopy() + out.Disk = in.Disk.DeepCopy() + if in.ExtraSpecs != nil { + in, out := &in.ExtraSpecs, &out.ExtraSpecs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservationSpecInstance. +func (in *ComputeReservationSpecInstance) DeepCopy() *ComputeReservationSpecInstance { + if in == nil { + return nil + } + out := new(ComputeReservationSpecInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeReservationStatus) DeepCopyInto(out *ComputeReservationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeReservationStatus. +func (in *ComputeReservationStatus) DeepCopy() *ComputeReservationStatus { + if in == nil { + return nil + } + out := new(ComputeReservationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/reservations/cmd/main.go b/reservations/cmd/main.go new file mode 100644 index 000000000..d33c04140 --- /dev/null +++ b/reservations/cmd/main.go @@ -0,0 +1,246 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "crypto/tls" + "flag" + "os" + "path/filepath" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/cobaltcore-dev/cortex/internal/conf" + reservationsv1alpha1 "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + "github.com/cobaltcore-dev/cortex/reservations/internal/controller" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(reservationsv1alpha1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +// nolint:gocyclo +func main() { + var metricsAddr string + var metricsCertPath, metricsCertName, metricsCertKey string + var webhookCertPath, webhookCertName, webhookCertKey string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.") + flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.") + flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.") + flag.StringVar(&metricsCertPath, "metrics-cert-path", "", + "The directory that contains the metrics server certificate.") + flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.") + flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + // Create watchers for metrics and webhooks certificates + var metricsCertWatcher, webhookCertWatcher *certwatcher.CertWatcher + + // Initial webhook TLS options + webhookTLSOpts := tlsOpts + + if len(webhookCertPath) > 0 { + setupLog.Info("Initializing webhook certificate watcher using provided certificates", + "webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey) + + var err error + webhookCertWatcher, err = certwatcher.New( + filepath.Join(webhookCertPath, webhookCertName), + filepath.Join(webhookCertPath, webhookCertKey), + ) + if err != nil { + setupLog.Error(err, "Failed to initialize webhook certificate watcher") + os.Exit(1) + } + + webhookTLSOpts = append(webhookTLSOpts, func(config *tls.Config) { + config.GetCertificate = webhookCertWatcher.GetCertificate + }) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: webhookTLSOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + // If the certificate is not specified, controller-runtime will automatically + // generate self-signed certificates for the metrics server. While convenient for development and testing, + // this setup is not recommended for production. + // + // If you enable certManager, uncomment the following lines: + // - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates + // managed by cert-manager for the metrics server. + // - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification. + if len(metricsCertPath) > 0 { + setupLog.Info("Initializing metrics certificate watcher using provided certificates", + "metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey) + + var err error + metricsCertWatcher, err = certwatcher.New( + filepath.Join(metricsCertPath, metricsCertName), + filepath.Join(metricsCertPath, metricsCertKey), + ) + if err != nil { + setupLog.Error(err, "to initialize metrics certificate watcher", "error", err) + os.Exit(1) + } + + metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, func(config *tls.Config) { + config.GetCertificate = metricsCertWatcher.GetCertificate + }) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "6fb26448.cortex", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err := (&controller.ComputeReservationReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Conf: conf.NewConfig[controller.Config](), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ComputeReservation") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if metricsCertWatcher != nil { + setupLog.Info("Adding metrics certificate watcher to manager") + if err := mgr.Add(metricsCertWatcher); err != nil { + setupLog.Error(err, "unable to add metrics certificate watcher to manager") + os.Exit(1) + } + } + + if webhookCertWatcher != nil { + setupLog.Info("Adding webhook certificate watcher to manager") + if err := mgr.Add(webhookCertWatcher); err != nil { + setupLog.Error(err, "unable to add webhook certificate watcher to manager") + os.Exit(1) + } + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting monitor") + monitor := controller.Monitor{Client: mgr.GetClient()} + monitor.Init() + metrics.Registry.MustRegister(&monitor) + + setupLog.Info("starting syncer") + ctx := context.Background() + syncer := controller.NewSyncer(mgr.GetClient()) + syncer.Init(ctx) + go syncer.Run(ctx) + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/reservations/config/crd/bases/reservations.cortex_computereservations.yaml b/reservations/config/crd/bases/reservations.cortex_computereservations.yaml new file mode 100644 index 000000000..5acff1e4c --- /dev/null +++ b/reservations/config/crd/bases/reservations.cortex_computereservations.yaml @@ -0,0 +1,163 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: computereservations.reservations.cortex +spec: + group: reservations.cortex + names: + kind: ComputeReservation + listKind: ComputeReservationList + plural: computereservations + shortNames: + - cres + singular: computereservation + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.host + name: Host + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ComputeReservation is the Schema for the computereservations + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of ComputeReservation + properties: + bareResource: + description: |- + If reservation kind is bare resource, this field will contain metadata + necessary to determine if the bare resource reservation can be fulfilled. + properties: + cpu: + anyOf: + - type: integer + - type: string + description: The amount of CPU to reserve (e.g., "2", "500m"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + disk: + anyOf: + - type: integer + - type: string + description: The amount of disk space to reserve (e.g., "10Gi", + "500Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + description: The amount of memory to reserve (e.g., "1Gi", "512Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - cpu + - disk + - memory + type: object + domainID: + description: The domain ID to reserve for. + type: string + instance: + description: |- + If reservation kind is instance, this field will contain metadata + necessary to determine if the instance reservation can be fulfilled. + properties: + disk: + anyOf: + - type: integer + - type: string + description: The disk space to reserve (e.g., "10Gi", "500Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + extraSpecs: + additionalProperties: + type: string + description: Extra specifications for the instance. + type: object + flavor: + description: The flavor name of the instance to reserve. + type: string + memory: + anyOf: + - type: integer + - type: string + description: The memory to reserve (e.g., "1Gi", "512Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + vCPUs: + anyOf: + - type: integer + - type: string + description: The number of vCPUs to reserve (e.g., "2", "500m"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - disk + - flavor + - memory + - vCPUs + type: object + kind: + description: The kind of reservation. + type: string + projectID: + description: The project ID to reserve for. + type: string + required: + - domainID + - kind + - projectID + type: object + status: + description: status defines the observed state of ComputeReservation + properties: + error: + description: An error explaining why the reservation is failed, if + applicable. + type: string + host: + description: The name of the compute host that was allocated. + type: string + phase: + description: The current phase of the reservation. + type: string + required: + - host + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/reservations/config/crd/kustomization.yaml b/reservations/config/crd/kustomization.yaml new file mode 100644 index 000000000..0d4a61a56 --- /dev/null +++ b/reservations/config/crd/kustomization.yaml @@ -0,0 +1,16 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/reservations.cortex_computereservations.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. +#configurations: +#- kustomizeconfig.yaml diff --git a/reservations/config/crd/kustomizeconfig.yaml b/reservations/config/crd/kustomizeconfig.yaml new file mode 100644 index 000000000..ec5c150a9 --- /dev/null +++ b/reservations/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/reservations/config/default/cert_metrics_manager_patch.yaml b/reservations/config/default/cert_metrics_manager_patch.yaml new file mode 100644 index 000000000..d97501553 --- /dev/null +++ b/reservations/config/default/cert_metrics_manager_patch.yaml @@ -0,0 +1,30 @@ +# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs. + +# Add the volumeMount for the metrics-server certs +- op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: + mountPath: /tmp/k8s-metrics-server/metrics-certs + name: metrics-certs + readOnly: true + +# Add the --metrics-cert-path argument for the metrics server +- op: add + path: /spec/template/spec/containers/0/args/- + value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs + +# Add the metrics-server certs volume configuration +- op: add + path: /spec/template/spec/volumes/- + value: + name: metrics-certs + secret: + secretName: metrics-server-cert + optional: false + items: + - key: ca.crt + path: ca.crt + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key diff --git a/reservations/config/default/kustomization.yaml b/reservations/config/default/kustomization.yaml new file mode 100644 index 000000000..032f5ef46 --- /dev/null +++ b/reservations/config/default/kustomization.yaml @@ -0,0 +1,234 @@ +# Adds namespace to all resources. +namespace: cortex-reservations + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: cortex-reservations- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# Uncomment the patches line if you enable Metrics and CertManager +# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line. +# This patch will protect the metrics with certManager self-signed certs. +#- path: cert_metrics_manager_patch.yaml +# target: +# kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml +# target: +# kind: Deployment + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Uncomment the following block to enable certificates for metrics +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.name +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 0 +# create: true + +# - source: +# kind: Service +# version: v1 +# name: controller-manager-metrics-service +# fieldPath: metadata.namespace +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: metrics-certs +# fieldPaths: +# - spec.dnsNames.0 +# - spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true +# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor +# kind: ServiceMonitor +# group: monitoring.coreos.com +# version: v1 +# name: controller-manager-metrics-monitor +# fieldPaths: +# - spec.endpoints.0.tlsConfig.serverName +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have any webhook +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # Name of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # Namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # This name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting ) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true + +# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion) +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.namespace # Namespace of the certificate CR +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionns +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert +# fieldPath: .metadata.name +# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD. +# +kubebuilder:scaffold:crdkustomizecainjectionname diff --git a/reservations/config/default/manager_metrics_patch.yaml b/reservations/config/default/manager_metrics_patch.yaml new file mode 100644 index 000000000..2aaef6536 --- /dev/null +++ b/reservations/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/reservations/config/default/metrics_service.yaml b/reservations/config/default/metrics_service.yaml new file mode 100644 index 000000000..05c771deb --- /dev/null +++ b/reservations/config/default/metrics_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager + app.kubernetes.io/name: reservations diff --git a/reservations/config/manager/kustomization.yaml b/reservations/config/manager/kustomization.yaml new file mode 100644 index 000000000..2b4623f7b --- /dev/null +++ b/reservations/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: example.com/reservations + newTag: v0.0.1 diff --git a/reservations/config/manager/manager.yaml b/reservations/config/manager/manager.yaml new file mode 100644 index 000000000..c7b54b19d --- /dev/null +++ b/reservations/config/manager/manager.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + spec: + securityContext: + # Projects are configured by default to adhere to the "restricted" Pod Security Standards. + # This ensures that deployments meet the highest security requirements for Kubernetes. + # For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + ports: [] + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + volumeMounts: [] + volumes: [] + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/reservations/config/network-policy/allow-metrics-traffic.yaml b/reservations/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 000000000..8dcdba1fc --- /dev/null +++ b/reservations/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,27 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/reservations/config/network-policy/kustomization.yaml b/reservations/config/network-policy/kustomization.yaml new file mode 100644 index 000000000..ec0fb5e57 --- /dev/null +++ b/reservations/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/reservations/config/prometheus/kustomization.yaml b/reservations/config/prometheus/kustomization.yaml new file mode 100644 index 000000000..fdc5481b1 --- /dev/null +++ b/reservations/config/prometheus/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- monitor.yaml + +# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus +# to securely reference certificates created and managed by cert-manager. +# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml +# to mount the "metrics-server-cert" secret in the Manager Deployment. +#patches: +# - path: monitor_tls_patch.yaml +# target: +# kind: ServiceMonitor diff --git a/reservations/config/prometheus/monitor.yaml b/reservations/config/prometheus/monitor.yaml new file mode 100644 index 000000000..b28ad9b77 --- /dev/null +++ b/reservations/config/prometheus/monitor.yaml @@ -0,0 +1,27 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification, exposing the system to potential man-in-the-middle attacks. + # For production environments, it is recommended to use cert-manager for automatic TLS certificate management. + # To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml, + # which securely references the certificate from the 'metrics-server-cert' secret. + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: reservations diff --git a/reservations/config/prometheus/monitor_tls_patch.yaml b/reservations/config/prometheus/monitor_tls_patch.yaml new file mode 100644 index 000000000..5bf84ce0d --- /dev/null +++ b/reservations/config/prometheus/monitor_tls_patch.yaml @@ -0,0 +1,19 @@ +# Patch for Prometheus ServiceMonitor to enable secure TLS configuration +# using certificates managed by cert-manager +- op: replace + path: /spec/endpoints/0/tlsConfig + value: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key diff --git a/reservations/config/rbac/computereservation_admin_role.yaml b/reservations/config/rbac/computereservation_admin_role.yaml new file mode 100644 index 000000000..199863489 --- /dev/null +++ b/reservations/config/rbac/computereservation_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over reservations.cortex. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: computereservation-admin-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - '*' +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get diff --git a/reservations/config/rbac/computereservation_editor_role.yaml b/reservations/config/rbac/computereservation_editor_role.yaml new file mode 100644 index 000000000..99f1e4da7 --- /dev/null +++ b/reservations/config/rbac/computereservation_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the reservations.cortex. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: computereservation-editor-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get diff --git a/reservations/config/rbac/computereservation_viewer_role.yaml b/reservations/config/rbac/computereservation_viewer_role.yaml new file mode 100644 index 000000000..e87bbbe51 --- /dev/null +++ b/reservations/config/rbac/computereservation_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to reservations.cortex resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: computereservation-viewer-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - get + - list + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get diff --git a/reservations/config/rbac/kustomization.yaml b/reservations/config/rbac/kustomization.yaml new file mode 100644 index 000000000..b8dff3439 --- /dev/null +++ b/reservations/config/rbac/kustomization.yaml @@ -0,0 +1,28 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the reservations itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- computereservation_admin_role.yaml +- computereservation_editor_role.yaml +- computereservation_viewer_role.yaml + diff --git a/reservations/config/rbac/leader_election_role.yaml b/reservations/config/rbac/leader_election_role.yaml new file mode 100644 index 000000000..3fbda71ff --- /dev/null +++ b/reservations/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/reservations/config/rbac/leader_election_role_binding.yaml b/reservations/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 000000000..8708c2651 --- /dev/null +++ b/reservations/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/reservations/config/rbac/metrics_auth_role.yaml b/reservations/config/rbac/metrics_auth_role.yaml new file mode 100644 index 000000000..32d2e4ec6 --- /dev/null +++ b/reservations/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/reservations/config/rbac/metrics_auth_role_binding.yaml b/reservations/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 000000000..e775d67ff --- /dev/null +++ b/reservations/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/reservations/config/rbac/metrics_reader_role.yaml b/reservations/config/rbac/metrics_reader_role.yaml new file mode 100644 index 000000000..51a75db47 --- /dev/null +++ b/reservations/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/reservations/config/rbac/role.yaml b/reservations/config/rbac/role.yaml new file mode 100644 index 000000000..d6cef6ef9 --- /dev/null +++ b/reservations/config/rbac/role.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/finalizers + verbs: + - update +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get + - patch + - update diff --git a/reservations/config/rbac/role_binding.yaml b/reservations/config/rbac/role_binding.yaml new file mode 100644 index 000000000..699f488a3 --- /dev/null +++ b/reservations/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/reservations/config/rbac/service_account.yaml b/reservations/config/rbac/service_account.yaml new file mode 100644 index 000000000..56d090386 --- /dev/null +++ b/reservations/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: reservations + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/reservations/dist/chart/.helmignore b/reservations/dist/chart/.helmignore new file mode 100644 index 000000000..7d92f7fb4 --- /dev/null +++ b/reservations/dist/chart/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building Helm packages. +# Operating system files +.DS_Store + +# Version control directories +.git/ +.gitignore +.bzr/ +.hg/ +.hgignore +.svn/ + +# Backup and temporary files +*.swp +*.tmp +*.bak +*.orig +*~ + +# IDE and editor-related files +.idea/ +.vscode/ + +# Helm chart artifacts +dist/chart/*.tgz diff --git a/reservations/dist/chart/Chart.lock b/reservations/dist/chart/Chart.lock new file mode 100644 index 000000000..db4c5823b --- /dev/null +++ b/reservations/dist/chart/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: owner-info + repository: oci://ghcr.io/sapcc/helm-charts + version: 1.0.0 +digest: sha256:7643f231cc4ebda347fd12ec62fe4445c280e2b71d27eec555f3025290f5038f +generated: "2025-08-26T10:55:05.888651+02:00" diff --git a/reservations/dist/chart/Chart.yaml b/reservations/dist/chart/Chart.yaml new file mode 100644 index 000000000..3c75d8fe4 --- /dev/null +++ b/reservations/dist/chart/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: cortex-reservations +description: A Helm chart to distribute the cortex reservations operator. +type: application +version: 0.1.0 +appVersion: "latest" +icon: "https://example.com/icon.png" +dependencies: + # Owner info adds a configmap to the kubernetes cluster with information on + # the service owner. This makes it easier to find out who to contact in case + # of issues. See: https://github.com/sapcc/helm-charts/pkgs/container/helm-charts%2Fowner-info + - name: owner-info + repository: oci://ghcr.io/sapcc/helm-charts + version: 1.0.0 diff --git a/reservations/dist/chart/charts/owner-info-1.0.0.tgz b/reservations/dist/chart/charts/owner-info-1.0.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..2032ead97387e7374750a71b0ffc726645bc5b7e GIT binary patch literal 2139 zcmV-h2&DHPiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI=abKAHP_cQ;B9jP<6Rq;bUN15v7CXH{JwC5({`qGC?Is(b1 zh%*Q<04Rl(?tkw9ASH^j>$>MlE*ay+qy_93_T#rpvEnFw$}N{~Mh>e|9uEpH9!BXVd8%l8IUYftP}3ba6>a&^ghN2(Ag`3hXu#i%LUc9O+oO z^MMEOS_4&3AmJLcfEb}tm;y2MQY?|D$del48idYKIsz*2I4Q6zhPP}*odi)3d0*Oi z-@xj`|B|o@)o+jh?uq}&B%1c)|Jmdz{y)na!XQUh_%`JHO0oeImC`0g35QqL*Kbe$ zaSA_O{Rl2K#FQ^!m7_!x$rLl>?sn6i}*c-y2Ak^j! zlxkpd1K}WS)*%cSRT@MFCi<0mAdEqYqGSlGTtIua$2fr3nKy)@Vw#$*&549?$dQ$> zqB;kvRD}v8E{IHxDu#wVK86gDK_<~^MtQ=j6itpQ2(O(m-q5_)q5?*5Fz`BB%;5b8 z&uFHqvJ_JL3n{A7>L&4HMJ1+?2?|k+8xW+D)J+Uoh;GMy{I9Vn8PTZ1#MbzNl#f#k?#ch(olhsd{6C#M;lIzb)@vVV1}BuK_z8k{ z76TmCgpJ8i`00}9B!|Jja;;1C{VWX4FI?zT@PGNm12EJwLKzGiO2N**f_<765mKdW zCX$8-kZ!#hSdxYy@^t%ri>SSX+mrM?@&6t9fAT;4@edD?b&qII)DGlb5=K*7KS-i& zExSwTmPeB{?-Xoo*d$dD)u z{!R)6D{4Bm0f|HjPGVL= zRr*@^?vxYHR3mMv+k$WT?X2N%yMy%_K2|~_1g@uTv@^nbtzvy#vR=czG2n=9uZ3pT zZBOtcL~z28w`YS6FgQJR2llYSdJR7d%3&~u!EJ|N1Da}vhdj+C8Z`iVwf|85gTOcx zzwedyw{2lJmwn*2cgW-W{=*z=2>(RjXxzizXj^ic2Ct< z;u;7Pl_nO;tT4`nxG~7o;up)nHIpL#853lPDV8vLi)?8hxknU4N>ptAqcpet2P@3@6Crof0H>v%;8KHMDBlJe2mt=`V&qgD7!%ebO2=kp*bK7^Dysky* zGh8BTL_47Y2D=G>Q?C})u|&@aN%`V5fY+Jr`I`b{s1aW8graUrQHx3*u+XL+nZql} z7%WhG+xf)Stgi32MFtW^R33QVa0ovsvOv#!k*53F^rxm;IM(gdb-tV9&mp4>XP!5o z&&|8nmDyxg9}Y&WCUHwFg`!%>dIln)voO5DvP2eO<)`RRf`o}G4HPNE#6J2a)7zul z!ZWn?+7UihkW^Bdn%B>ZM<8%$+ zI!8VRlDfH6XN28?k3)lX2A>BO+Yz`8G#=YVXcmSGs`Dxi5>bSTlt~iqcB)WI#IPVr zqYPE8G!`L#B1OsYxCaca34Qo0;RqK3o7YS72`(#hydaz(7Q}{_d+nCB_J`&l&cN3> zLM9j!D+8P|iY#>)g_I={*&PDy`JKWtLd+o~#@56AVsJa*FB1e_y~ zBBM*R6fWMpc?(M_m;>EoyV>4{D{3b47OFLvBeM0J7oe)x6bMB#npg_17+Iah&`fd1 zZBxf|$VhUnu5rYYT z3BIt7EOyb*c55fG&^e@3Vxn1Xc~RcJo;qtbS&VX+2W`p^Tvzsf;Hp))-!FVFq%4iZQ~{~9D2S#BVXTG{WhP|d;%8TlEOno9qp=+)6{Xx*Ciqm=QfZ>?*7T2Jd~J*}^Q R{U-nb|Nj_?q>KP4004X22~_|9 literal 0 HcmV?d00001 diff --git a/reservations/dist/chart/templates/_helpers.tpl b/reservations/dist/chart/templates/_helpers.tpl new file mode 100644 index 000000000..8ab463a11 --- /dev/null +++ b/reservations/dist/chart/templates/_helpers.tpl @@ -0,0 +1,50 @@ +{{- define "chart.name" -}} +{{- if .Chart }} + {{- if .Chart.Name }} + {{- .Chart.Name | trunc 63 | trimSuffix "-" }} + {{- else if .Values.nameOverride }} + {{ .Values.nameOverride | trunc 63 | trimSuffix "-" }} + {{- else }} + reservations + {{- end }} +{{- else }} + reservations +{{- end }} +{{- end }} + + +{{- define "chart.labels" -}} +{{- if .Chart.AppVersion -}} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Chart.Version }} +helm.sh/chart: {{ .Chart.Version | quote }} +{{- end }} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + +{{- define "chart.hasMutatingWebhooks" -}} +{{- $hasMutating := false }} +{{- range . }} + {{- if eq .type "mutating" }} + $hasMutating = true }}{{- end }} +{{- end }} +{{ $hasMutating }}}}{{- end }} + + +{{- define "chart.hasValidatingWebhooks" -}} +{{- $hasValidating := false }} +{{- range . }} + {{- if eq .type "validating" }} + $hasValidating = true }}{{- end }} +{{- end }} +{{ $hasValidating }}}}{{- end }} diff --git a/reservations/dist/chart/templates/certmanager/certificate.yaml b/reservations/dist/chart/templates/certmanager/certificate.yaml new file mode 100644 index 000000000..7a9e573fa --- /dev/null +++ b/reservations/dist/chart/templates/certmanager/certificate.yaml @@ -0,0 +1,36 @@ +{{- if .Values.certmanager.enable }} +# Self-signed Issuer +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: selfsigned-issuer + namespace: {{ .Release.Namespace }} +spec: + selfSigned: {} +{{- if .Values.metrics.enable }} +--- +# Certificate for the metrics +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + annotations: + {{- if .Values.crd.keep }} + "helm.sh/resource-policy": keep + {{- end }} + labels: + {{- include "chart.labels" . | nindent 4 }} + name: metrics-certs + namespace: {{ .Release.Namespace }} +spec: + dnsNames: + - reservations.{{ .Release.Namespace }}.svc + - reservations.{{ .Release.Namespace }}.svc.cluster.local + - reservations-metrics-service.{{ .Release.Namespace }}.svc + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: metrics-server-cert +{{- end }} +{{- end }} diff --git a/reservations/dist/chart/templates/crd/reservations.cortex_computereservations.yaml b/reservations/dist/chart/templates/crd/reservations.cortex_computereservations.yaml new file mode 100644 index 000000000..b4384dad1 --- /dev/null +++ b/reservations/dist/chart/templates/crd/reservations.cortex_computereservations.yaml @@ -0,0 +1,170 @@ +{{- if .Values.crd.enable }} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + annotations: + {{- if .Values.crd.keep }} + "helm.sh/resource-policy": keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.18.0 + name: computereservations.reservations.cortex +spec: + group: reservations.cortex + names: + kind: ComputeReservation + listKind: ComputeReservationList + plural: computereservations + shortNames: + - cres + singular: computereservation + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.host + name: Host + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ComputeReservation is the Schema for the computereservations + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of ComputeReservation + properties: + bareResource: + description: |- + If reservation kind is bare resource, this field will contain metadata + necessary to determine if the bare resource reservation can be fulfilled. + properties: + cpu: + anyOf: + - type: integer + - type: string + description: The amount of CPU to reserve (e.g., "2", "500m"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + disk: + anyOf: + - type: integer + - type: string + description: The amount of disk space to reserve (e.g., "10Gi", + "500Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + description: The amount of memory to reserve (e.g., "1Gi", "512Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - cpu + - disk + - memory + type: object + domainID: + description: The domain ID to reserve for. + type: string + instance: + description: |- + If reservation kind is instance, this field will contain metadata + necessary to determine if the instance reservation can be fulfilled. + properties: + disk: + anyOf: + - type: integer + - type: string + description: The disk space to reserve (e.g., "10Gi", "500Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + extraSpecs: + additionalProperties: + type: string + description: Extra specifications for the instance. + type: object + flavor: + description: The flavor name of the instance to reserve. + type: string + memory: + anyOf: + - type: integer + - type: string + description: The memory to reserve (e.g., "1Gi", "512Mi"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + vCPUs: + anyOf: + - type: integer + - type: string + description: The number of vCPUs to reserve (e.g., "2", "500m"). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - disk + - flavor + - memory + - vCPUs + type: object + kind: + description: The kind of reservation. + type: string + projectID: + description: The project ID to reserve for. + type: string + required: + - domainID + - kind + - projectID + type: object + status: + description: status defines the observed state of ComputeReservation + properties: + error: + description: An error explaining why the reservation is failed, if + applicable. + type: string + host: + description: The name of the compute host that was allocated. + type: string + phase: + description: The current phase of the reservation. + type: string + required: + - host + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +{{- end -}} diff --git a/reservations/dist/chart/templates/manager/manager.yaml b/reservations/dist/chart/templates/manager/manager.yaml new file mode 100644 index 000000000..418ed7c72 --- /dev/null +++ b/reservations/dist/chart/templates/manager/manager.yaml @@ -0,0 +1,107 @@ +# This file is safe from kubebuilder edit --plugins=helm/v1-alpha +# If you want to re-generate, add the --force flag. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reservations-controller-manager + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} + control-plane: controller-manager +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + {{- include "chart.labels" . | nindent 8 }} + control-plane: controller-manager + {{- if and .Values.controllerManager.pod .Values.controllerManager.pod.labels }} + {{- range $key, $value := .Values.controllerManager.pod.labels }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + containers: + - name: manager + args: + {{- range .Values.controllerManager.container.args }} + - {{ . }} + {{- end }} + command: + - /manager + image: {{ .Values.controllerManager.container.image.repository }}:{{ .Values.controllerManager.container.image.tag | default .Chart.AppVersion }} + {{- if .Values.controllerManager.container.env }} + env: + {{- range $key, $value := .Values.controllerManager.container.env }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + {{- end }} + livenessProbe: + {{- toYaml .Values.controllerManager.container.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.controllerManager.container.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.controllerManager.container.resources | nindent 12 }} + securityContext: + {{- toYaml .Values.controllerManager.container.securityContext | nindent 12 }} + volumeMounts: + - name: reservations-controller-manager-config-volume + mountPath: /etc/config + - name: reservations-controller-manager-secrets-volume + mountPath: /etc/secrets + readOnly: true + {{- if and .Values.metrics.enable .Values.certmanager.enable }} + - name: metrics-certs + mountPath: /tmp/k8s-metrics-server/metrics-certs + readOnly: true + {{- end }} + securityContext: + {{- toYaml .Values.controllerManager.securityContext | nindent 8 }} + serviceAccountName: {{ .Values.controllerManager.serviceAccountName }} + terminationGracePeriodSeconds: {{ .Values.controllerManager.terminationGracePeriodSeconds }} + volumes: + # Custom values to configure the controller-manager. + - name: reservations-controller-manager-config-volume + configMap: + name: reservations-controller-manager-config + - name: reservations-controller-manager-secrets-volume + secret: + secretName: reservations-controller-manager-secrets + {{- if and .Values.metrics.enable .Values.certmanager.enable }} + - name: metrics-certs + secret: + secretName: metrics-server-cert + {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: reservations-controller-manager-config +data: + conf.json: |- + {{- if .Values.reservations.conf }} + {{ toJson .Values.reservations.conf }} + {{- else }} + {} + {{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: reservations-controller-manager-secrets +type: Opaque +data: + secrets.json: |- + {{- if .Values.reservations.secrets }} + {{ toJson .Values.reservations.secrets | b64enc }} + {{- else }} + {{ "{}" | b64enc }} + {{- end }} \ No newline at end of file diff --git a/reservations/dist/chart/templates/metrics/metrics-service.yaml b/reservations/dist/chart/templates/metrics/metrics-service.yaml new file mode 100644 index 000000000..3ad645cf8 --- /dev/null +++ b/reservations/dist/chart/templates/metrics/metrics-service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.metrics.enable }} +apiVersion: v1 +kind: Service +metadata: + name: reservations-controller-manager-metrics-service + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} + control-plane: controller-manager +spec: + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + name: https + selector: + control-plane: controller-manager +{{- end }} diff --git a/reservations/dist/chart/templates/network-policy/allow-metrics-traffic.yaml b/reservations/dist/chart/templates/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 000000000..fa55330a8 --- /dev/null +++ b/reservations/dist/chart/templates/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,28 @@ +{{- if .Values.networkPolicy.enable }} +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gather data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: allow-metrics-traffic + namespace: {{ .Release.Namespace }} +spec: + podSelector: + matchLabels: + control-plane: controller-manager + app.kubernetes.io/name: reservations + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP +{{- end -}} diff --git a/reservations/dist/chart/templates/prometheus/monitor.yaml b/reservations/dist/chart/templates/prometheus/monitor.yaml new file mode 100644 index 000000000..93cd348b6 --- /dev/null +++ b/reservations/dist/chart/templates/prometheus/monitor.yaml @@ -0,0 +1,40 @@ +# To integrate with Prometheus. +{{- if .Values.prometheus.enable }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + control-plane: controller-manager + name: reservations-controller-manager-metrics-monitor + namespace: {{ .Release.Namespace }} +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + {{- if .Values.certmanager.enable }} + serverName: reservations-controller-manager-metrics-service.{{ .Release.Namespace }}.svc + # Apply secure TLS configuration with cert-manager + insecureSkipVerify: false + ca: + secret: + name: metrics-server-cert + key: ca.crt + cert: + secret: + name: metrics-server-cert + key: tls.crt + keySecret: + name: metrics-server-cert + key: tls.key + {{- else }} + # Development/Test mode (insecure configuration) + insecureSkipVerify: true + {{- end }} + selector: + matchLabels: + control-plane: controller-manager +{{- end }} diff --git a/reservations/dist/chart/templates/rbac/computereservation_admin_role.yaml b/reservations/dist/chart/templates/rbac/computereservation_admin_role.yaml new file mode 100644 index 000000000..7bceb97e9 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/computereservation_admin_role.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.enable }} +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over reservations.cortex. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: computereservation-admin-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - '*' +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/computereservation_editor_role.yaml b/reservations/dist/chart/templates/rbac/computereservation_editor_role.yaml new file mode 100644 index 000000000..0869cd144 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/computereservation_editor_role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rbac.enable }} +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the reservations.cortex. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: computereservation-editor-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/computereservation_viewer_role.yaml b/reservations/dist/chart/templates/rbac/computereservation_viewer_role.yaml new file mode 100644 index 000000000..ff872d075 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/computereservation_viewer_role.yaml @@ -0,0 +1,30 @@ +{{- if .Values.rbac.enable }} +# This rule is not used by the project reservations itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to reservations.cortex resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: computereservation-viewer-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - get + - list + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/leader_election_role.yaml b/reservations/dist/chart/templates/rbac/leader_election_role.yaml new file mode 100644 index 000000000..07c80a966 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/leader_election_role.yaml @@ -0,0 +1,42 @@ +{{- if .Values.rbac.enable }} +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} + name: reservations-leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/leader_election_role_binding.yaml b/reservations/dist/chart/templates/rbac/leader_election_role_binding.yaml new file mode 100644 index 000000000..8cc35084f --- /dev/null +++ b/reservations/dist/chart/templates/rbac/leader_election_role_binding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} + name: reservations-leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: reservations-leader-election-role +subjects: +- kind: ServiceAccount + name: {{ .Values.controllerManager.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/metrics_auth_role.yaml b/reservations/dist/chart/templates/rbac/metrics_auth_role.yaml new file mode 100644 index 000000000..9b0c32d2c --- /dev/null +++ b/reservations/dist/chart/templates/rbac/metrics_auth_role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: reservations-metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/metrics_auth_role_binding.yaml b/reservations/dist/chart/templates/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 000000000..be7d23bd9 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: reservations-metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: reservations-metrics-auth-role +subjects: +- kind: ServiceAccount + name: {{ .Values.controllerManager.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/metrics_reader_role.yaml b/reservations/dist/chart/templates/rbac/metrics_reader_role.yaml new file mode 100644 index 000000000..b4f3af494 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/metrics_reader_role.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.rbac.enable .Values.metrics.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: reservations-metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/role.yaml b/reservations/dist/chart/templates/rbac/role.yaml new file mode 100644 index 000000000..057407369 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/role.yaml @@ -0,0 +1,36 @@ +{{- if .Values.rbac.enable }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: reservations-manager-role +rules: +- apiGroups: + - reservations.cortex + resources: + - computereservations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - reservations.cortex + resources: + - computereservations/finalizers + verbs: + - update +- apiGroups: + - reservations.cortex + resources: + - computereservations/status + verbs: + - get + - patch + - update +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/role_binding.yaml b/reservations/dist/chart/templates/rbac/role_binding.yaml new file mode 100644 index 000000000..e5fd69ae7 --- /dev/null +++ b/reservations/dist/chart/templates/rbac/role_binding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: reservations-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: reservations-manager-role +subjects: +- kind: ServiceAccount + name: {{ .Values.controllerManager.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/reservations/dist/chart/templates/rbac/service_account.yaml b/reservations/dist/chart/templates/rbac/service_account.yaml new file mode 100644 index 000000000..93e0a323e --- /dev/null +++ b/reservations/dist/chart/templates/rbac/service_account.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.enable }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- if and .Values.controllerManager.serviceAccount .Values.controllerManager.serviceAccount.annotations }} + annotations: + {{- range $key, $value := .Values.controllerManager.serviceAccount.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + name: {{ .Values.controllerManager.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/reservations/dist/chart/values.yaml b/reservations/dist/chart/values.yaml new file mode 100644 index 000000000..f6affc369 --- /dev/null +++ b/reservations/dist/chart/values.yaml @@ -0,0 +1,129 @@ +# This file is safe from kubebuilder edit --plugins=helm/v1-alpha +# If you want to re-generate, add the --force flag. + +owner-info: + enabled: true + helm-chart-url: "https://github.com/cobaltcore-dev/cortex/reservations/dist/chart" + maintainers: + - "p.matthes@sap.com" + - "markus.wieland@sap.com" + - "arno.uhlig@sap.com" + support-group: "workload-management" + service: "cortex-reservations" + +# [MANAGER]: Manager Deployment Configurations +controllerManager: + replicas: 1 + container: + image: + repository: ghcr.io/cobaltcore-dev/cortex-reservations-operator + args: + - "--leader-elect" + - "--metrics-bind-address=:8443" + - "--health-probe-bind-address=:8081" + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 20 + httpGet: + path: /healthz + port: 8081 + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + httpGet: + path: /readyz + port: 8081 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + serviceAccountName: reservations-controller-manager + +# [RBAC]: To enable RBAC (Permissions) configurations +rbac: + enable: true + +# [CRDs]: To enable the CRDs +crd: + # This option determines whether the CRDs are included + # in the installation process. + enable: true + + # Enabling this option adds the "helm.sh/resource-policy": keep + # annotation to the CRD, ensuring it remains installed even when + # the Helm release is uninstalled. + # NOTE: Removing the CRDs will also remove all cert-manager CR(s) + # (Certificates, Issuers, ...) due to garbage collection. + keep: true + +# [METRICS]: Set to true to generate manifests for exporting metrics. +# To disable metrics export set false, and ensure that the +# ControllerManager argument "--metrics-bind-address=:8443" is removed. +metrics: + enable: true + +# [PROMETHEUS]: To enable a ServiceMonitor to export metrics to Prometheus set true +prometheus: + enable: true + +# [CERT-MANAGER]: To enable cert-manager injection to webhooks set true +certmanager: + enable: false + +# [NETWORK POLICIES]: To enable NetworkPolicies set true +networkPolicy: + enable: false + +# SSO certificate to use. +sharedSSOCert: &sharedSSOCert + # Certificate "public key". (Optional, remove this key if not needed) + cert: | + -----BEGIN CERTIFICATE----- + Your certificate here + -----END CERTIFICATE----- + # Certificate private key. (Optional, remove this key if not needed) + certKey: | + -----BEGIN PRIVATE KEY----- + Your private key here + -----END PRIVATE KEY + # Whether the certificate is self-signed. + # If true, the certificate is not verified. + selfSigned: false + +reservations: + # Default configuration provided through configmap to the operator. + conf: + # Which hypervisor types should be handled by the operator. + hypervisors: + - "QEMU" + - "CH" + # Not supported: + # - "VMware vCenter Server" + # - "ironic" + endpoints: + # The URL of the Nova external scheduler service. + novaExternalScheduler: "http://cortex-nova-scheduler:8080/scheduler/nova/external" + # Config provided here will override the config provided above. + secrets: + # Override the endpoints and credentials to your OpenStack. + keystone: + url: https://path-to-keystone/v3 + sso: *sharedSSOCert + username: openstack-user-with-all-project-read-access + password: openstack-user-password + projectName: openstack-project-of-user + userDomainName: openstack-domain-of-user + projectDomainName: openstack-domain-of-project-scoped-to diff --git a/reservations/go.mod b/reservations/go.mod new file mode 100644 index 000000000..d1c602d6f --- /dev/null +++ b/reservations/go.mod @@ -0,0 +1,115 @@ +module github.com/cobaltcore-dev/cortex/reservations + +go 1.24.0 + +replace ( + github.com/cobaltcore-dev/cortex => ../ + github.com/cobaltcore-dev/cortex/reservations/api => ./api +) + +require ( + github.com/cobaltcore-dev/cortex v0.0.0-00010101000000-000000000000 + github.com/cobaltcore-dev/cortex/reservations/api v0.0.0-00010101000000-000000000000 + github.com/gophercloud/gophercloud/v2 v2.8.0 + k8s.io/apimachinery v0.33.4 + k8s.io/client-go v0.33.0 + sigs.k8s.io/controller-runtime v0.21.0 +) + +require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/dlmiddlecote/sqlstats v1.0.2 // indirect + github.com/eclipse/paho.mqtt.golang v1.5.0 // indirect + github.com/go-gorp/gorp v2.2.0+incompatible // indirect + github.com/golang-migrate/migrate/v4 v4.18.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/sapcc/go-api-declarations v1.17.3 // indirect + go.uber.org/atomic v1.11.0 // indirect +) + +require ( + cel.dev/expr v0.19.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/sapcc/go-bits v0.0.0-20250820140623-085431e07de8 + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.36.7 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.33.0 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/reservations/go.sum b/reservations/go.sum new file mode 100644 index 000000000..7e66e696c --- /dev/null +++ b/reservations/go.sum @@ -0,0 +1,388 @@ +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dhui/dktest v0.4.5 h1:uUfYBIVREmj/Rw6MvgmqNAYzTiKOHJak+enB5Di73MM= +github.com/dhui/dktest v0.4.5/go.mod h1:tmcyeHDKagvlDrz7gDKq4UAJOLIfVZYkfD5OnHDwcCo= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI= +github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eclipse/paho.mqtt.golang v1.5.0 h1:EH+bUVJNgttidWFkLLVKaQPGmkTUfQQqjOsyvMGvD6o= +github.com/eclipse/paho.mqtt.golang v1.5.0/go.mod h1:du/2qNQVqJf/Sqs4MEL77kR8QTqANF7XU7Fk0aOTAgk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-gorp/gorp v2.2.0+incompatible h1:xAUh4QgEeqPPhK3vxZN+bzrim1z5Av6q837gtjUlshc= +github.com/go-gorp/gorp v2.2.0+incompatible/go.mod h1:7IfkAQnO7jfT/9IQ3R9wL1dFhukN6aQxzKTHnkxzA/E= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs= +github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo= +github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI= +github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sapcc/go-api-declarations v1.17.3 h1:ILRfsFD9ChSeekyzlDInLMqjC830gBcyK6ULlFdP45I= +github.com/sapcc/go-api-declarations v1.17.3/go.mod h1:MWmLjmvjftgyAugNUfIhsDsHIzXH1pn32cWLZpiluKg= +github.com/sapcc/go-bits v0.0.0-20250820140623-085431e07de8 h1:3Y95JYv8ou5LGiu0+HNdyZ4iRSxZNsNvabqQdHFJesI= +github.com/sapcc/go-bits v0.0.0-20250820140623-085431e07de8/go.mod h1:ey5mUTDJagbUtjNebUQKkR/3e1UAJ/NTUz1/TDdgk1Q= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/reservations/hack/boilerplate.go.txt b/reservations/hack/boilerplate.go.txt new file mode 100644 index 000000000..0fb88f919 --- /dev/null +++ b/reservations/hack/boilerplate.go.txt @@ -0,0 +1,2 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 \ No newline at end of file diff --git a/reservations/internal/controller/conf.go b/reservations/internal/controller/conf.go new file mode 100644 index 000000000..8b30a5535 --- /dev/null +++ b/reservations/internal/controller/conf.go @@ -0,0 +1,22 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import "github.com/cobaltcore-dev/cortex/internal/conf" + +// Endpoints for the reservations operator. +type EndpointsConfig struct { + // The nova external scheduler endpoint. + NovaExternalScheduler string `json:"novaExternalScheduler"` +} + +// Configuration for the reservations operator. +type Config struct { + // The endpoint where to find the nova external scheduler endpoint. + Endpoints EndpointsConfig `json:"endpoints"` + // Hypervisor types that should be managed. + Hypervisors []string `json:"hypervisors"` + // Keystone config. + Keystone conf.KeystoneConfig `json:"keystone"` +} diff --git a/reservations/internal/controller/conf_test.go b/reservations/internal/controller/conf_test.go new file mode 100644 index 000000000..b78cc55d5 --- /dev/null +++ b/reservations/internal/controller/conf_test.go @@ -0,0 +1,128 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "testing" + + "github.com/cobaltcore-dev/cortex/internal/conf" +) + +func TestConfig_Structure(t *testing.T) { + // Test that Config struct can be instantiated + config := Config{ + Endpoints: EndpointsConfig{ + NovaExternalScheduler: "http://localhost:8080", + }, + Hypervisors: []string{"kvm", "vmware"}, + Keystone: conf.KeystoneConfig{ + URL: "http://keystone:5000/v3", + OSUsername: "test-user", + OSPassword: "test-password", + OSProjectName: "test-project", + OSUserDomainName: "default", + OSProjectDomainName: "default", + }, + } + + // Verify the config fields are set correctly + if config.Endpoints.NovaExternalScheduler != "http://localhost:8080" { + t.Errorf("Expected NovaExternalScheduler to be 'http://localhost:8080', got %v", config.Endpoints.NovaExternalScheduler) + } + + if len(config.Hypervisors) != 2 { + t.Errorf("Expected 2 hypervisors, got %d", len(config.Hypervisors)) + } + + if config.Hypervisors[0] != "kvm" { + t.Errorf("Expected first hypervisor to be 'kvm', got %v", config.Hypervisors[0]) + } + + if config.Hypervisors[1] != "vmware" { + t.Errorf("Expected second hypervisor to be 'vmware', got %v", config.Hypervisors[1]) + } + + if config.Keystone.URL != "http://keystone:5000/v3" { + t.Errorf("Expected Keystone URL to be 'http://keystone:5000/v3', got %v", config.Keystone.URL) + } +} + +func TestEndpointsConfig_Structure(t *testing.T) { + // Test that EndpointsConfig struct can be instantiated + endpoints := EndpointsConfig{ + NovaExternalScheduler: "http://nova-scheduler:8080/v1/schedule", + } + + if endpoints.NovaExternalScheduler != "http://nova-scheduler:8080/v1/schedule" { + t.Errorf("Expected NovaExternalScheduler to be 'http://nova-scheduler:8080/v1/schedule', got %v", endpoints.NovaExternalScheduler) + } +} + +func TestConfig_EmptyValues(t *testing.T) { + // Test that Config struct works with empty values + config := Config{} + + if config.Endpoints.NovaExternalScheduler != "" { + t.Errorf("Expected empty NovaExternalScheduler, got %v", config.Endpoints.NovaExternalScheduler) + } + + if len(config.Hypervisors) != 0 { + t.Errorf("Expected 0 hypervisors, got %d", len(config.Hypervisors)) + } + + if config.Keystone.URL != "" { + t.Errorf("Expected empty Keystone URL, got %v", config.Keystone.URL) + } +} + +func TestConfig_HypervisorsList(t *testing.T) { + // Test various hypervisor configurations + tests := []struct { + name string + hypervisors []string + expectedLen int + expectedVals []string + }{ + { + name: "single hypervisor", + hypervisors: []string{"kvm"}, + expectedLen: 1, + expectedVals: []string{"kvm"}, + }, + { + name: "multiple hypervisors", + hypervisors: []string{"kvm", "vmware", "xen"}, + expectedLen: 3, + expectedVals: []string{"kvm", "vmware", "xen"}, + }, + { + name: "empty hypervisors", + hypervisors: []string{}, + expectedLen: 0, + expectedVals: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := Config{ + Hypervisors: tt.hypervisors, + } + + if len(config.Hypervisors) != tt.expectedLen { + t.Errorf("Expected %d hypervisors, got %d", tt.expectedLen, len(config.Hypervisors)) + } + + for i, expected := range tt.expectedVals { + if i >= len(config.Hypervisors) { + t.Errorf("Expected hypervisor at index %d to be %v, but index is out of range", i, expected) + continue + } + if config.Hypervisors[i] != expected { + t.Errorf("Expected hypervisor at index %d to be %v, got %v", i, expected, config.Hypervisors[i]) + } + } + }) + } +} diff --git a/reservations/internal/controller/controller.go b/reservations/internal/controller/controller.go new file mode 100644 index 000000000..bc9b7dfe1 --- /dev/null +++ b/reservations/internal/controller/controller.go @@ -0,0 +1,193 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "slices" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/cobaltcore-dev/cortex/internal/scheduler/nova/api" + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + reservationsv1alpha1 "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + "github.com/sapcc/go-bits/jobloop" +) + +// ComputeReservationReconciler reconciles a ComputeReservation object +type ComputeReservationReconciler struct { + // Client for the kubernetes API. + client.Client + // Kubernetes scheme to use for the reservations. + Scheme *runtime.Scheme + // Configuration for the controller. + Conf Config +} + +// +kubebuilder:rbac:groups=reservations.cortex,resources=computereservations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=reservations.cortex,resources=computereservations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=reservations.cortex,resources=computereservations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ComputeReservationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx) + // Fetch the reservation object. + var res v1alpha1.ComputeReservation + if err := r.Get(ctx, req.NamespacedName, &res); err != nil { + // Can happen when the resource was just deleted. + return ctrl.Result{}, err + } + // If the reservation is already active, skip it. + if res.Status.Phase == v1alpha1.ComputeReservationStatusPhaseActive { + log.Info("reservation is already active, skipping", "reservation", req.Name) + return ctrl.Result{}, nil // Don't need to requeue. + } + switch res.Spec.Kind { + case v1alpha1.ComputeReservationSpecKindInstance: + return r.reconcileInstanceReservation(ctx, req, res) + case v1alpha1.ComputeReservationSpecKindBareResource: + return r.reconcileBareResourceReservation(ctx, req, res) + default: + log.Info("reservation kind is not supported, skipping", "reservation", req.Name, "kind", res.Spec.Kind) + return ctrl.Result{}, nil // Don't need to requeue. + } +} + +// Reconcile an instance reservation. +func (r *ComputeReservationReconciler) reconcileInstanceReservation( + ctx context.Context, + req ctrl.Request, + res v1alpha1.ComputeReservation, +) (ctrl.Result, error) { + log := logf.FromContext(ctx) + spec := res.Spec.Instance + hvType, ok := spec.ExtraSpecs["capabilities:hypervisor_type"] + if !ok || !slices.Contains(r.Conf.Hypervisors, hvType) { + log.Info("hypervisor type is not supported", "reservation", req.Name, "type", hvType) + if hvType == "" { + res.Status.Error = "hypervisor type is not specified" + } else { + hvs := r.Conf.Hypervisors + sort.Strings(hvs) + supported := strings.Join(hvs, ", ") + res.Status.Error = fmt.Sprintf("unsupported hv '%s', supported: %s", hvType, supported) + } + res.Status.Phase = v1alpha1.ComputeReservationStatusPhaseFailed + if err := r.Client.Status().Update(ctx, &res); err != nil { + log.Error(err, "failed to update reservation status") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + return ctrl.Result{}, nil // No need to requeue, the reservation is now failed. + } + + // Convert resource.Quantity to integers for the API + memoryValue := spec.Memory.ScaledValue(resource.Mega) + if memoryValue < 0 { + return ctrl.Result{}, fmt.Errorf("invalid memory value: %d", memoryValue) + } + memoryMB := uint64(memoryValue) + + vCPUsValue := spec.VCPUs.Value() + if vCPUsValue < 0 { + return ctrl.Result{}, fmt.Errorf("invalid vCPUs value: %d", vCPUsValue) + } + vCPUs := uint64(vCPUsValue) + + diskValue := spec.Disk.ScaledValue(resource.Giga) + if diskValue < 0 { + return ctrl.Result{}, fmt.Errorf("invalid disk value: %d", diskValue) + } + diskGB := uint64(diskValue) + + externalSchedulerRequest := api.ExternalSchedulerRequest{ + Sandboxed: true, + PreselectAllHosts: true, + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + NumInstances: 1, // One for each reservation. + ProjectID: res.Spec.ProjectID, + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + Name: spec.Flavor, + ExtraSpecs: spec.ExtraSpecs, + MemoryMB: memoryMB, + VCPUs: vCPUs, + RootGB: diskGB, + }, + }, + }, + }, + } + httpClient := http.Client{} + url := r.Conf.Endpoints.NovaExternalScheduler + reqBody, err := json.Marshal(externalSchedulerRequest) + if err != nil { + log.Error(err, "failed to marshal external scheduler request") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + response, err := httpClient.Post(url, "application/json", strings.NewReader(string(reqBody))) + if err != nil { + log.Error(err, "failed to send external scheduler request") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + defer response.Body.Close() + var externalSchedulerResponse api.ExternalSchedulerResponse + if err := json.NewDecoder(response.Body).Decode(&externalSchedulerResponse); err != nil { + log.Error(err, "failed to decode external scheduler response") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + if len(externalSchedulerResponse.Hosts) == 0 { + log.Info("no hosts found for reservation", "reservation", req.Name) + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, errors.New("no hosts found for reservation") + } + // Update the reservation with the found host (idx 0) + host := externalSchedulerResponse.Hosts[0] + log.Info("found host for reservation", "reservation", req.Name, "host", host) + res.Status.Phase = v1alpha1.ComputeReservationStatusPhaseActive + res.Status.Host = host + res.Status.Error = "" // Clear any previous error. + if err := r.Status().Update(ctx, &res); err != nil { + log.Error(err, "failed to update reservation status") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + return ctrl.Result{}, nil // No need to requeue, the reservation is now active. +} + +// Reconcile a bare resource reservation. +func (r *ComputeReservationReconciler) reconcileBareResourceReservation( + ctx context.Context, + req ctrl.Request, + res v1alpha1.ComputeReservation, +) (ctrl.Result, error) { + + log := logf.FromContext(ctx) + log.Info("bare resource reservations are not supported", "reservation", req.Name) + res.Status.Phase = v1alpha1.ComputeReservationStatusPhaseFailed + res.Status.Error = "bare resource reservations are not supported" + if err := r.Client.Status().Update(ctx, &res); err != nil { + log.Error(err, "failed to update reservation status") + return ctrl.Result{RequeueAfter: jobloop.DefaultJitter(time.Minute)}, err + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ComputeReservationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&reservationsv1alpha1.ComputeReservation{}). + Named("computereservation"). + Complete(r) +} diff --git a/reservations/internal/controller/controller_test.go b/reservations/internal/controller/controller_test.go new file mode 100644 index 000000000..bd0f1024b --- /dev/null +++ b/reservations/internal/controller/controller_test.go @@ -0,0 +1,456 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/cobaltcore-dev/cortex/internal/scheduler/nova/api" + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" +) + +func TestComputeReservationReconciler_Reconcile(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + tests := []struct { + name string + reservation *v1alpha1.ComputeReservation + expectedPhase v1alpha1.ComputeReservationStatusPhase + expectedError string + shouldRequeue bool + }{ + { + name: "skip already active reservation", + reservation: &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + }, + }, + expectedPhase: v1alpha1.ComputeReservationStatusPhaseActive, + shouldRequeue: false, + }, + { + name: "skip unsupported reservation kind", + reservation: &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: "unsupported", + ProjectID: "test-project", + }, + }, + shouldRequeue: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.reservation). + Build() + + reconciler := &ComputeReservationReconciler{ + Client: client, + Scheme: scheme, + Conf: Config{ + Hypervisors: []string{"kvm", "vmware"}, + }, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.reservation.Name, + }, + } + + result, err := reconciler.Reconcile(context.Background(), req) + + if err != nil { + t.Errorf("Reconcile() error = %v", err) + return + } + + if tt.shouldRequeue && result.RequeueAfter == 0 { + t.Errorf("Expected requeue but got none") + } + + if !tt.shouldRequeue && result.RequeueAfter > 0 { + t.Errorf("Expected no requeue but got %v", result.RequeueAfter) + } + + // Verify the reservation status if expected + if tt.expectedPhase != "" { + var updated v1alpha1.ComputeReservation + err := client.Get(context.Background(), req.NamespacedName, &updated) + if err != nil { + t.Errorf("Failed to get updated reservation: %v", err) + return + } + + if updated.Status.Phase != tt.expectedPhase { + t.Errorf("Expected phase %v, got %v", tt.expectedPhase, updated.Status.Phase) + } + + if tt.expectedError != "" && updated.Status.Error != tt.expectedError { + t.Errorf("Expected error %v, got %v", tt.expectedError, updated.Status.Error) + } + } + }) + } +} + +func TestComputeReservationReconciler_reconcileInstanceReservation(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + tests := []struct { + name string + reservation *v1alpha1.ComputeReservation + config Config + mockResponse *api.ExternalSchedulerResponse + expectedPhase v1alpha1.ComputeReservationStatusPhase + expectedError string + shouldRequeue bool + }{ + { + name: "unsupported hypervisor type", + reservation: &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "unsupported", + }, + Memory: resource.MustParse("1Gi"), + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), + }, + }, + }, + config: Config{ + Hypervisors: []string{"kvm", "vmware"}, + }, + expectedPhase: v1alpha1.ComputeReservationStatusPhaseFailed, + expectedError: "unsupported hv 'unsupported', supported: kvm, vmware", + shouldRequeue: false, + }, + { + name: "missing hypervisor type", + reservation: &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + ExtraSpecs: map[string]string{}, + Memory: resource.MustParse("1Gi"), + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), + }, + }, + }, + config: Config{ + Hypervisors: []string{"kvm", "vmware"}, + }, + expectedPhase: v1alpha1.ComputeReservationStatusPhaseFailed, + expectedError: "hypervisor type is not specified", + shouldRequeue: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tt.reservation). + WithStatusSubresource(&v1alpha1.ComputeReservation{}). + Build() + + // Create a mock server for the external scheduler + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if tt.mockResponse != nil { + json.NewEncoder(w).Encode(tt.mockResponse) + } else { + w.WriteHeader(http.StatusInternalServerError) + } + })) + defer server.Close() + + tt.config.Endpoints.NovaExternalScheduler = server.URL + + reconciler := &ComputeReservationReconciler{ + Client: client, + Scheme: scheme, + Conf: tt.config, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.reservation.Name, + }, + } + + result, err := reconciler.reconcileInstanceReservation(context.Background(), req, *tt.reservation) + + if err != nil && !tt.shouldRequeue { + t.Errorf("reconcileInstanceReservation() error = %v", err) + return + } + + if tt.shouldRequeue && result.RequeueAfter == 0 { + t.Errorf("Expected requeue but got none") + } + + if !tt.shouldRequeue && result.RequeueAfter > 0 { + t.Errorf("Expected no requeue but got %v", result.RequeueAfter) + } + + // Verify the reservation status + var updated v1alpha1.ComputeReservation + err = client.Get(context.Background(), req.NamespacedName, &updated) + if err != nil { + t.Errorf("Failed to get updated reservation: %v", err) + return + } + + if updated.Status.Phase != tt.expectedPhase { + t.Errorf("Expected phase %v, got %v", tt.expectedPhase, updated.Status.Phase) + } + + if tt.expectedError != "" && updated.Status.Error != tt.expectedError { + t.Errorf("Expected error %v, got %v", tt.expectedError, updated.Status.Error) + } + }) + } +} + +func TestComputeReservationReconciler_reconcileInstanceReservation_Success(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + reservation := &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "kvm", + }, + Memory: resource.MustParse("1Gi"), + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), + }, + }, + } + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(reservation). + WithStatusSubresource(&v1alpha1.ComputeReservation{}). + Build() + + // Create a mock server that returns a successful response + mockResponse := &api.ExternalSchedulerResponse{ + Hosts: []string{"test-host-1", "test-host-2"}, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Verify the request body + var req api.ExternalSchedulerRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + t.Errorf("Failed to decode request: %v", err) + w.WriteHeader(http.StatusBadRequest) + return + } + + // Verify request structure + if !req.Sandboxed { + t.Errorf("Expected Sandboxed to be true") + } + if !req.PreselectAllHosts { + t.Errorf("Expected PreselectAllHosts to be true") + } + if req.Spec.Data.NumInstances != 1 { + t.Errorf("Expected NumInstances to be 1, got %d", req.Spec.Data.NumInstances) + } + + json.NewEncoder(w).Encode(mockResponse) + })) + defer server.Close() + + config := Config{ + Hypervisors: []string{"kvm", "vmware"}, + Endpoints: EndpointsConfig{ + NovaExternalScheduler: server.URL, + }, + } + + reconciler := &ComputeReservationReconciler{ + Client: client, + Scheme: scheme, + Conf: config, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: reservation.Name, + }, + } + + result, err := reconciler.reconcileInstanceReservation(context.Background(), req, *reservation) + + if err != nil { + t.Errorf("reconcileInstanceReservation() error = %v", err) + return + } + + if result.RequeueAfter > 0 { + t.Errorf("Expected no requeue but got %v", result.RequeueAfter) + } + + // Verify the reservation status + var updated v1alpha1.ComputeReservation + err = client.Get(context.Background(), req.NamespacedName, &updated) + if err != nil { + t.Errorf("Failed to get updated reservation: %v", err) + return + } + + if updated.Status.Phase != v1alpha1.ComputeReservationStatusPhaseActive { + t.Errorf("Expected phase %v, got %v", v1alpha1.ComputeReservationStatusPhaseActive, updated.Status.Phase) + } + + if updated.Status.Host != "test-host-1" { + t.Errorf("Expected host %v, got %v", "test-host-1", updated.Status.Host) + } + + if updated.Status.Error != "" { + t.Errorf("Expected no error, got %v", updated.Status.Error) + } +} + +func TestComputeReservationReconciler_reconcileBareResourceReservation(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + reservation := &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindBareResource, + ProjectID: "test-project", + BareResource: v1alpha1.ComputeReservationSpecBareResource{ + CPU: resource.MustParse("2"), + Memory: resource.MustParse("1Gi"), + Disk: resource.MustParse("10Gi"), + }, + }, + } + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(reservation). + WithStatusSubresource(&v1alpha1.ComputeReservation{}). + Build() + + reconciler := &ComputeReservationReconciler{ + Client: client, + Scheme: scheme, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: reservation.Name, + }, + } + + result, err := reconciler.reconcileBareResourceReservation(context.Background(), req, *reservation) + + if err != nil { + t.Errorf("reconcileBareResourceReservation() error = %v", err) + return + } + + if result.RequeueAfter > 0 { + t.Errorf("Expected no requeue but got %v", result.RequeueAfter) + } + + // Verify the reservation status + var updated v1alpha1.ComputeReservation + err = client.Get(context.Background(), req.NamespacedName, &updated) + if err != nil { + t.Errorf("Failed to get updated reservation: %v", err) + return + } + + if updated.Status.Phase != v1alpha1.ComputeReservationStatusPhaseFailed { + t.Errorf("Expected phase %v, got %v", v1alpha1.ComputeReservationStatusPhaseFailed, updated.Status.Phase) + } + + expectedError := "bare resource reservations are not supported" + if updated.Status.Error != expectedError { + t.Errorf("Expected error %v, got %v", expectedError, updated.Status.Error) + } +} + +func TestComputeReservationReconciler_SetupWithManager(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + reconciler := &ComputeReservationReconciler{ + Scheme: scheme, + } + + // This test just verifies that SetupWithManager method exists + // We can't easily test the actual setup without a real manager + // but we can verify the method signature is correct by calling it with nil + // (it will return an error, but that's expected) + err := reconciler.SetupWithManager(nil) + if err == nil { + t.Error("Expected error when calling SetupWithManager with nil manager") + } +} diff --git a/reservations/internal/controller/monitor.go b/reservations/internal/controller/monitor.go new file mode 100644 index 000000000..115a15f33 --- /dev/null +++ b/reservations/internal/controller/monitor.go @@ -0,0 +1,106 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "strings" + + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + "github.com/prometheus/client_golang/prometheus" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + monitorLog = ctrl.Log.WithName("monitor") +) + +// Monitor for reservations metrics. +type Monitor struct { + // Client for the kubernetes API. + client.Client + + // Metrics + numberOfReservations *prometheus.GaugeVec + reservedResources *prometheus.GaugeVec +} + +// Initialize the metrics and bind them to the registry. +func (m *Monitor) Init() { + m.numberOfReservations = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_reservations_number", + Help: "Number of reservations.", + }, []string{"status_phase", "status_error", "spec_kind"}) + m.reservedResources = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_reservations_resources", + Help: "Resources reserved by reservations.", + }, []string{"status_phase", "status_error", "spec_kind", "host", "resource"}) +} + +// Describe the metrics for Prometheus. +func (m *Monitor) Describe(ch chan<- *prometheus.Desc) { + m.numberOfReservations.Describe(ch) + m.reservedResources.Describe(ch) +} + +// Collect the metrics on-demand and send them to Prometheus. +func (m *Monitor) Collect(ch chan<- prometheus.Metric) { + // Fetch all reservations from kubernetes. + var reservations v1alpha1.ComputeReservationList + if err := m.List( + context.Background(), + &reservations, + ); err != nil { + monitorLog.Error(err, "failed to list reservations") + return + } + + countByLabels := map[string]uint64{} + for _, reservation := range reservations.Items { + key := string(reservation.Status.Phase) + + "," + strings.ReplaceAll(reservation.Status.Error, ",", ";") + + "," + string(reservation.Spec.Kind) + countByLabels[key]++ + } + for key, count := range countByLabels { + labelValues := strings.Split(key, ",") + m.numberOfReservations.WithLabelValues(labelValues...).Set(float64(count)) + } + m.numberOfReservations.Collect(ch) + + resourcesByLabels := map[string]map[string]uint64{} + for _, reservation := range reservations.Items { + host := "" + key := string(reservation.Status.Phase) + + "," + strings.ReplaceAll(reservation.Status.Error, ",", ";") + + "," + string(reservation.Spec.Kind) + + "," + host + if _, ok := resourcesByLabels[key]; !ok { + resourcesByLabels[key] = map[string]uint64{} + } + switch reservation.Spec.Kind { + case v1alpha1.ComputeReservationSpecKindInstance: + // Instance reservations have resources defined in the instance spec. + resourcesByLabels[key]["vcpus"] += reservation.Spec.Instance.VCPUs. + AsDec().UnscaledBig().Uint64() + resourcesByLabels[key]["memory_mb"] += reservation.Spec.Instance.Memory. + AsDec().UnscaledBig().Uint64() / 1000000 + resourcesByLabels[key]["disk_gb"] += reservation.Spec.Instance.Disk. + AsDec().UnscaledBig().Uint64() / 1000000000 + default: + continue // Skip non-instance reservations. + } + } + for key, resources := range resourcesByLabels { + labelValues := strings.Split(key, ",") + for resource, value := range resources { + m.reservedResources. + WithLabelValues(append(labelValues, resource)...). + Set(float64(value)) + } + } + m.reservedResources.Collect(ch) + monitorLog.Info("collected reservation metrics", "reservations", len(reservations.Items)) +} diff --git a/reservations/internal/controller/monitor_test.go b/reservations/internal/controller/monitor_test.go new file mode 100644 index 000000000..09d86770c --- /dev/null +++ b/reservations/internal/controller/monitor_test.go @@ -0,0 +1,431 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +func TestMonitor_Init(t *testing.T) { + monitor := &Monitor{} + monitor.Init() + + if monitor.numberOfReservations == nil { + t.Error("numberOfReservations metric should be initialized") + } + + if monitor.reservedResources == nil { + t.Error("reservedResources metric should be initialized") + } +} + +func TestMonitor_Describe(t *testing.T) { + monitor := &Monitor{} + monitor.Init() + + ch := make(chan *prometheus.Desc, 10) + go func() { + monitor.Describe(ch) + close(ch) + }() + + // Count the number of descriptions + count := 0 + for range ch { + count++ + } + + // Should have descriptions for both metrics + if count != 2 { + t.Errorf("Expected 2 metric descriptions, got %d", count) + } +} + +func TestMonitor_Collect_EmptyList(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + monitor := &Monitor{ + Client: k8sClient, + } + monitor.Init() + + ch := make(chan prometheus.Metric, 10) + go func() { + monitor.Collect(ch) + close(ch) + }() + + // Count the metrics + count := 0 + for range ch { + count++ + } + + // Should have at least the base metrics even with empty list + if count < 0 { + t.Errorf("Expected at least 0 metrics, got %d", count) + } +} + +func TestMonitor_Collect_WithReservations(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + // Create test reservations + reservations := []v1alpha1.ComputeReservation{ + { + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation-1", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project-1", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: resource.MustParse("1Gi"), + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + Host: "test-host-1", + }, + }, + { + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation-2", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project-2", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor-2", + Memory: resource.MustParse("2Gi"), + VCPUs: resource.MustParse("4"), + Disk: resource.MustParse("20Gi"), + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseFailed, + Error: "test error", + }, + }, + { + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation-3", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindBareResource, + ProjectID: "test-project-3", + BareResource: v1alpha1.ComputeReservationSpecBareResource{ + CPU: resource.MustParse("4"), + Memory: resource.MustParse("4Gi"), + Disk: resource.MustParse("40Gi"), + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + }, + }, + } + + // Convert to client.Object slice + objects := make([]client.Object, len(reservations)) + for i := range reservations { + objects[i] = &reservations[i] + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + + monitor := &Monitor{ + Client: k8sClient, + } + monitor.Init() + + ch := make(chan prometheus.Metric, 100) + go func() { + monitor.Collect(ch) + close(ch) + }() + + // Collect all metrics + metrics := []prometheus.Metric{} + for metric := range ch { + metrics = append(metrics, metric) + } + + if len(metrics) == 0 { + t.Error("Expected some metrics to be collected") + } + + // Verify that we have metrics for different phases and kinds + foundActiveInstance := false + foundFailedInstance := false + foundActiveBare := false + + for _, metric := range metrics { + var m dto.Metric + if err := metric.Write(&m); err != nil { + continue + } + + // Check if this is a numberOfReservations metric + if m.GetGauge() != nil { + labels := make(map[string]string) + for _, label := range m.GetLabel() { + labels[label.GetName()] = label.GetValue() + } + + if labels["status_phase"] == "active" && labels["spec_kind"] == "instance" { + foundActiveInstance = true + } + if labels["status_phase"] == "failed" && labels["spec_kind"] == "instance" { + foundFailedInstance = true + } + if labels["status_phase"] == "active" && labels["spec_kind"] == "bare" { + foundActiveBare = true + } + } + } + + if !foundActiveInstance { + t.Error("Expected to find active instance reservation metric") + } + if !foundFailedInstance { + t.Error("Expected to find failed instance reservation metric") + } + if !foundActiveBare { + t.Error("Expected to find active bare resource reservation metric") + } +} + +func TestMonitor_Collect_ResourceMetrics(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + // Create test reservation with specific resource values + reservation := &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: resource.MustParse("1000Mi"), // 1000 MiB + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), // 10 GiB + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseActive, + }, + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(reservation). + Build() + + monitor := &Monitor{ + Client: k8sClient, + } + monitor.Init() + + ch := make(chan prometheus.Metric, 100) + go func() { + monitor.Collect(ch) + close(ch) + }() + + // Collect all metrics + metrics := []prometheus.Metric{} + for metric := range ch { + metrics = append(metrics, metric) + } + + // Look for resource metrics + foundVCPUs := false + foundMemory := false + foundDisk := false + + for _, metric := range metrics { + var m dto.Metric + if err := metric.Write(&m); err != nil { + continue + } + + if m.GetGauge() != nil { + labels := make(map[string]string) + for _, label := range m.GetLabel() { + labels[label.GetName()] = label.GetValue() + } + + if labels["resource"] == "vcpus" { + foundVCPUs = true + if m.GetGauge().GetValue() != 2 { + t.Errorf("Expected vCPUs value 2, got %f", m.GetGauge().GetValue()) + } + } + if labels["resource"] == "memory_mb" { + foundMemory = true + // Memory: 1000Mi = 1000 * 1024 * 1024 bytes = 1048576000 bytes + // Converted to MB: 1048576000 / 1000000 = 1048.576 MB + expectedMemoryMB := float64(1048) // 1000Mi converted to MB + if m.GetGauge().GetValue() != expectedMemoryMB { + t.Errorf("Expected memory_mb value %f, got %f", expectedMemoryMB, m.GetGauge().GetValue()) + } + } + if labels["resource"] == "disk_gb" { + foundDisk = true + // Disk: 10Gi = 10 * 1024 * 1024 * 1024 bytes = 10737418240 bytes + // Converted to GB: 10737418240 / 1000000000 = 10.737418240 GB + expectedDiskGB := float64(10) // 10Gi converted to GB (rounded down due to integer division) + if m.GetGauge().GetValue() != expectedDiskGB { + t.Errorf("Expected disk_gb value %f, got %f", expectedDiskGB, m.GetGauge().GetValue()) + } + } + } + } + + if !foundVCPUs { + t.Error("Expected to find vCPUs resource metric") + } + if !foundMemory { + t.Error("Expected to find memory resource metric") + } + if !foundDisk { + t.Error("Expected to find disk resource metric") + } +} + +func TestMonitor_Collect_ErrorHandling(t *testing.T) { + // Test with a client that will fail to list reservations + scheme := runtime.NewScheme() + // Don't add the scheme, which should cause the List operation to fail + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + monitor := &Monitor{ + Client: k8sClient, + } + monitor.Init() + + ch := make(chan prometheus.Metric, 10) + go func() { + monitor.Collect(ch) + close(ch) + }() + + // Should not panic and should handle the error gracefully + count := 0 + for range ch { + count++ + } + + // Should not collect any metrics due to the error + if count != 0 { + t.Errorf("Expected 0 metrics due to error, got %d", count) + } +} + +func TestMonitor_Collect_LabelSanitization(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + // Create test reservation with error containing commas + reservation := &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "test-reservation", + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "test-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "test-flavor", + Memory: resource.MustParse("1Gi"), + VCPUs: resource.MustParse("2"), + Disk: resource.MustParse("10Gi"), + }, + }, + Status: v1alpha1.ComputeReservationStatus{ + Phase: v1alpha1.ComputeReservationStatusPhaseFailed, + Error: "error with, commas, in it", + }, + } + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(reservation). + Build() + + monitor := &Monitor{ + Client: client, + } + monitor.Init() + + ch := make(chan prometheus.Metric, 100) + go func() { + monitor.Collect(ch) + close(ch) + }() + + // Collect all metrics + metrics := []prometheus.Metric{} + for metric := range ch { + metrics = append(metrics, metric) + } + + // Verify that commas in error messages are replaced with semicolons + foundSanitizedError := false + for _, metric := range metrics { + var m dto.Metric + if err := metric.Write(&m); err != nil { + continue + } + + if m.GetGauge() != nil { + for _, label := range m.GetLabel() { + if label.GetName() == "status_error" && label.GetValue() == "error with; commas; in it" { + foundSanitizedError = true + break + } + } + } + } + + if !foundSanitizedError { + t.Error("Expected to find sanitized error label with semicolons instead of commas") + } +} diff --git a/reservations/internal/controller/syncer.go b/reservations/internal/controller/syncer.go new file mode 100644 index 000000000..9ddf6544c --- /dev/null +++ b/reservations/internal/controller/syncer.go @@ -0,0 +1,484 @@ +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + gosync "sync" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/cobaltcore-dev/cortex/internal/conf" + "github.com/cobaltcore-dev/cortex/internal/keystone" + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/v2/openstack/identity/v3/projects" + "github.com/sapcc/go-bits/jobloop" + "github.com/sapcc/go-bits/must" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + syncLog = ctrl.Log.WithName("sync") +) + +// Commitment model from the limes API. +// See: https://github.com/sapcc/limes/blob/5ea068b/docs/users/api-spec-resources.md?plain=1#L493 +// See: https://github.com/sapcc/go-api-declarations/blob/94ee3e5/limes/resources/commitment.go#L19 +type Commitment struct { + // A unique numerical identifier for this commitment. This API uses this + // numerical ID to refer to the commitment in other API calls. + ID int `json:"id"` + // A unique string identifier for this commitment. The next major version of + // this API will use this UUID instead of the numerical ID to refer to + // commitments in API calls. + UUID string `json:"uuid"` + // The resource for which usage is committed. + ServiceType string `json:"service_type"` + ResourceName string `json:"resource_name"` + // The availability zone in which usage is committed. + AvailabilityZone string `json:"availability_zone"` + // The amount of usage that was committed to. + Amount uint64 `json:"amount"` + // For measured resources, the unit for this resource. The value from the + // amount field is measured in this unit. + Unit string `json:"unit"` + // The requested duration of this commitment, expressed as a comma-separated + // sequence of positive integer multiples of time units like "1 year, + // 3 months". Acceptable time units include "second", "minute", "hour", + // "day", "month" and "year". + Duration string `json:"duration"` + // UNIX timestamp when this commitment was created. + CreatedAt uint64 `json:"created_at"` + // UNIX timestamp when this commitment should be confirmed. Only shown if + // this was given when creating the commitment, to delay confirmation into + // the future. + ConfirmBy *uint64 `json:"confirm_by,omitempty"` + // UNIX timestamp when this commitment was confirmed. Only shown after + // confirmation. + ConfirmedAt *uint64 `json:"confirmed_at,omitempty"` + // UNIX timestamp when this commitment is set to expire. Note that the + // duration counts from confirmBy (or from createdAt for immediately- + // confirmed commitments) and is calculated at creation time, so this is + // also shown on unconfirmed commitments. + ExpiresAt uint64 `json:"expires_at"` + // Whether the commitment is marked for transfer to a different project. + // Transferable commitments do not count towards quota calculation in their + // project, but still block capacity and still count towards billing. Not + // shown if false. + Transferable bool `json:"transferable"` + // The current status of this commitment. If provided, one of "planned", + // "pending", "guaranteed", "confirmed", "superseded", or "expired". + Status string `json:"status,omitempty"` + // Whether a mail notification should be sent if a created commitment is + // confirmed. Can only be set if the commitment contains a confirmBy value. + NotifyOnConfirm bool `json:"notify_on_confirm"` + + // Data from Keystone + + // The openstack project ID this commitment is for. + ProjectID string `json:"project_id"` + // The openstack domain ID this commitment is for. + DomainID string `json:"domain_id"` + + // Resolved flavor if the commitment is for a specific instance, + // i.e. has the unit instances_. + Flavor *Flavor +} + +// OpenStack flavor model as returned by the Nova API under /flavors/detail. +// See: https://docs.openstack.org/api-ref/compute/#list-flavors +type Flavor struct { + ID string `json:"id"` + Disk int `json:"disk"` // in GB. + RAM int `json:"ram"` // in MB. + Name string `json:"name"` + RxTxFactor float64 `json:"rxtx_factor"` + VCPUs int `json:"vcpus"` + IsPublic bool `json:"os-flavor-access:is_public"` + Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` + Description string `json:"description"` + + // JSON string of extra specifications used when scheduling the flavor. + ExtraSpecs map[string]string `json:"extra_specs" db:"extra_specs"` +} + +// Client to fetch commitments. +type CommitmentsClient interface { + // Init the client. + Init(ctx context.Context) + // Get all commitments with resolved metadata (e.g. project, flavor, ...). + GetComputeCommitments(ctx context.Context) ([]Commitment, error) +} + +// Commitments client fetching commitments from openstack services. +type commitmentsClient struct { + // Basic config to authenticate against openstack. + conf conf.KeystoneConfig + + // Providerclient authenticated against openstack. + provider *gophercloud.ProviderClient + // Keystone service client for OpenStack. + keystone *gophercloud.ServiceClient + // Nova service client for OpenStack. + nova *gophercloud.ServiceClient + // Limes service client for OpenStack. + limes *gophercloud.ServiceClient +} + +// Create a new commitments client. +// By default, this client will fetch commitments from the limes API. +func NewCommitmentsClient(conf conf.KeystoneConfig) CommitmentsClient { + return &commitmentsClient{conf: conf} +} + +// Init the client. +func (c *commitmentsClient) Init(ctx context.Context) { + syncLog.Info("authenticating against openstack", "url", c.conf.URL) + auth := keystone.NewKeystoneAPI(c.conf) + must.Succeed(auth.Authenticate(ctx)) + c.provider = auth.Client() + syncLog.Info("authenticated against openstack") + + // Get the keystone endpoint. + url := must.Return(c.provider.EndpointLocator(gophercloud.EndpointOpts{ + Type: "identity", + Availability: "public", + })) + syncLog.Info("using identity endpoint", "url", url) + c.keystone = &gophercloud.ServiceClient{ + ProviderClient: c.provider, + Endpoint: url, + Type: "identity", + } + + // Get the nova endpoint. + url = must.Return(c.provider.EndpointLocator(gophercloud.EndpointOpts{ + Type: "compute", + Availability: "public", + })) + syncLog.Info("using nova endpoint", "url", url) + c.nova = &gophercloud.ServiceClient{ + ProviderClient: c.provider, + Endpoint: url, + Type: "compute", + Microversion: "2.61", + } + + // Get the limes endpoint. + url = must.Return(c.provider.EndpointLocator(gophercloud.EndpointOpts{ + Type: "resources", + Availability: "public", + })) + syncLog.Info("using limes endpoint", "url", url) + c.limes = &gophercloud.ServiceClient{ + ProviderClient: c.provider, + Endpoint: url, + Type: "resources", + } +} + +// Get all Nova flavors by their name to resolve instance commitments. +func (c *commitmentsClient) GetAllFlavors(ctx context.Context) ([]Flavor, error) { + syncLog.Info("fetching all flavors from nova") + flo := flavors.ListOpts{AccessType: flavors.AllAccess} + pages, err := flavors.ListDetail(c.nova, flo).AllPages(ctx) + if err != nil { + return nil, err + } + // Parse the json data into our custom model. + var data = &struct { + Flavors []Flavor `json:"flavors"` + }{} + if err := pages.(flavors.FlavorPage).ExtractInto(data); err != nil { + return nil, err + } + syncLog.Info("fetched flavors from nova", "count", len(data.Flavors)) + return data.Flavors, nil +} + +// Get all projects from Keystone to resolve commitments. +func (c *commitmentsClient) GetAllProjects(ctx context.Context) ([]projects.Project, error) { + syncLog.Info("fetching projects from keystone") + allPages, err := projects.List(c.keystone, nil).AllPages(ctx) + if err != nil { + return nil, err + } + var data = &struct { + Projects []projects.Project `json:"projects"` + }{} + if err := allPages.(projects.ProjectPage).ExtractInto(data); err != nil { + return nil, err + } + syncLog.Info("fetched projects from keystone", "count", len(data.Projects)) + return data.Projects, nil +} + +// Get all available commitments from limes + keystone + nova. +// This function fetches the commitments for each project in parallel. +func (c *commitmentsClient) GetComputeCommitments(ctx context.Context) ([]Commitment, error) { + projects, err := c.GetAllProjects(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get projects: %w", err) + } + syncLog.Info("fetching flavor commitments from limes", "projects", len(projects)) + commitmentsMutex := gosync.Mutex{} + commitments := []Commitment{} + var wg gosync.WaitGroup + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Channel to communicate errors from goroutines. + errChan := make(chan error, len(projects)) + for _, project := range projects { + wg.Add(1) + go func() { + defer wg.Done() + // Fetch instance commitments for the project. + newResults, err := c.getCommitments(ctx, project) + if err != nil { + errChan <- err + cancel() + return + } + commitmentsMutex.Lock() + commitments = append(commitments, newResults...) + commitmentsMutex.Unlock() + }() + time.Sleep(jobloop.DefaultJitter(50 * time.Millisecond)) // Don't overload the API. + } + // Wait for all goroutines to finish and close the error channel. + go func() { + wg.Wait() + close(errChan) + }() + // Return the first error encountered, if any. + for err := range errChan { + if err != nil { + syncLog.Error(err, "failed to resolve commitments") + return nil, err + } + } + syncLog.Info("resolved commitments from limes", "count", len(commitments)) + + flavors, err := c.GetAllFlavors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get flavors: %w", err) + } + // Resolve the flavor for each commitment. + flavorsByName := make(map[string]Flavor, len(flavors)) + for _, flavor := range flavors { + flavorsByName[flavor.Name] = flavor + } + for i := range commitments { + if !strings.HasPrefix(commitments[i].ResourceName, "instances_") { + // Not an instance commitment. + continue + } + flavorName := strings.TrimPrefix(commitments[i].ResourceName, "instances_") + if flavor, ok := flavorsByName[flavorName]; ok { + commitments[i].Flavor = &flavor + } else { + syncLog.Info("flavor not found for commitment", "flavor", flavorName, "commitment_id", commitments[i].ID) + } + } + return commitments, nil +} + +// Resolve the commitments for the given project. +func (c *commitmentsClient) getCommitments(ctx context.Context, project projects.Project) ([]Commitment, error) { + url := c.limes.Endpoint + "v1" + + "/domains/" + project.DomainID + + "/projects/" + project.ID + + "/commitments" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", c.limes.Token()) + resp, err := c.limes.HTTPClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + var list struct { + Commitments []Commitment `json:"commitments"` + } + err = json.NewDecoder(resp.Body).Decode(&list) + if err != nil { + return nil, err + } + // Add the project information to each commitment. + var commitments []Commitment + for _, c := range list.Commitments { + if c.ServiceType != "compute" { + // Not a compute commitment. + continue + } + c.ProjectID = project.ID + c.DomainID = project.DomainID + commitments = append(commitments, c) + } + return commitments, nil +} + +type Syncer struct { + // Client to fetch commitments. + CommitmentsClient + // Client for the kubernetes API. + client.Client +} + +// Create a new compute reservation syncer. +func NewSyncer(k8sClient client.Client) *Syncer { + config := conf.NewConfig[Config]() + return &Syncer{ + CommitmentsClient: NewCommitmentsClient(config.Keystone), + Client: k8sClient, + } +} + +// Initialize the syncer. +func (s *Syncer) Init(ctx context.Context) { + // Initialize the syncer. + s.CommitmentsClient.Init(ctx) +} + +// Convert a limes unit to a resource quantity. +func limesUnitToResource(val int64, unit string) (resource.Quantity, error) { + switch unit { + case "": + return *resource.NewQuantity(val, resource.DecimalSI), nil + case "B": + return *resource.NewQuantity(val, resource.BinarySI), nil + case "KiB": + return *resource.NewQuantity(val*1024, resource.BinarySI), nil + case "MiB": + return *resource.NewQuantity(val*1024*1024, resource.BinarySI), nil + case "GiB": + return *resource.NewQuantity(val*1024*1024*1024, resource.BinarySI), nil + case "TiB": + return *resource.NewQuantity(val*1024*1024*1024*1024, resource.BinarySI), nil + default: + return resource.Quantity{}, fmt.Errorf("unsupported limes unit: %s", unit) + } +} + +// Fetch commitments and update/create reservations for each of them. +func (s *Syncer) SyncReservations(ctx context.Context) error { + computeCommitments, err := s.GetComputeCommitments(ctx) + if err != nil { + return err + } + var reservations []v1alpha1.ComputeReservation + // Instance reservations for each commitment. + for _, commitment := range computeCommitments { + // Get only the 5 first characters from the uuid. This should be safe enough. + if len(commitment.UUID) < 5 { + err := errors.New("commitment UUID is too short") + syncLog.Error(err, "uuid is less than 5 characters", "uuid", commitment.UUID) + continue + } + commitmentUUIDShort := commitment.UUID[:5] + + if commitment.Flavor != nil { + // Flavor (instance) commitment + for n := range commitment.Amount { // N instances + reservations = append(reservations, v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: fmt.Sprintf("commitment-%s-%d", commitmentUUIDShort, n), + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: commitment.ProjectID, + DomainID: commitment.DomainID, + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: commitment.Flavor.Name, + ExtraSpecs: commitment.Flavor.ExtraSpecs, + Memory: *resource.NewQuantity(int64(commitment.Flavor.RAM)*1024*1024, resource.BinarySI), + VCPUs: *resource.NewQuantity(int64(commitment.Flavor.VCPUs), resource.DecimalSI), + Disk: *resource.NewQuantity(int64(commitment.Flavor.Disk)*1024*1024*1024, resource.BinarySI), + }, + }, + }) + } + continue + } + + // Bare resource commitment + reservation := v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: fmt.Sprintf("commitment-%s", commitmentUUIDShort), + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindBareResource, + ProjectID: commitment.ProjectID, + DomainID: commitment.DomainID, + }, + } + quantity, err := limesUnitToResource(int64(commitment.Amount), commitment.Unit) + if err != nil { + syncLog.Error(err, "failed to convert limes unit", "resource name", commitment.ResourceName) + continue + } + switch commitment.ResourceName { + case "cores": + reservation.Spec.BareResource.CPU = quantity + case "ram": + reservation.Spec.BareResource.Memory = quantity + default: + syncLog.Info("unsupported bare resource commitment unit", "resource name", commitment.ResourceName) + continue + } + reservations = append(reservations, reservation) + } + for _, res := range reservations { + // Check if the reservation already exists. + nn := types.NamespacedName{Name: res.Name, Namespace: res.Namespace} + var existing v1alpha1.ComputeReservation + if err := s.Get(ctx, nn, &existing); err != nil { + if !k8serrors.IsNotFound(err) { + syncLog.Error(err, "failed to get reservation", "name", nn.Name) + return err + } + // Reservation does not exist, create it. + if err := s.Create(ctx, &res); err != nil { + return err + } + syncLog.Info("created reservation", "name", nn.Name) + continue + } + // Reservation exists, update it. + existing.Spec = res.Spec + if err := s.Update(ctx, &existing); err != nil { + syncLog.Error(err, "failed to update reservation", "name", nn.Name) + return err + } + syncLog.Info("updated reservation", "name", nn.Name) + } + syncLog.Info("synced reservations", "count", len(reservations)) + return nil +} + +// Run a sync loop for reservations. +func (s *Syncer) Run(ctx context.Context) { + go func() { + for { + if err := s.SyncReservations(ctx); err != nil { + syncLog.Error(err, "failed to sync reservations") + } + time.Sleep(jobloop.DefaultJitter(time.Hour)) + } + }() +} diff --git a/reservations/internal/controller/syncer_test.go b/reservations/internal/controller/syncer_test.go new file mode 100644 index 000000000..9820c7534 --- /dev/null +++ b/reservations/internal/controller/syncer_test.go @@ -0,0 +1,604 @@ +// Copyright 2025 SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "errors" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/cobaltcore-dev/cortex/reservations/api/v1alpha1" +) + +// Mock CommitmentsClient for testing +type mockCommitmentsClient struct { + commitments []Commitment + initCalled bool + shouldError bool +} + +func (m *mockCommitmentsClient) Init(ctx context.Context) { + m.initCalled = true +} + +func (m *mockCommitmentsClient) GetComputeCommitments(ctx context.Context) ([]Commitment, error) { + if m.shouldError { + return nil, errors.New("mock error") + } + return m.commitments, nil +} + +func TestNewSyncer(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create a syncer directly without using NewSyncer to avoid config file dependencies + mockClient := &mockCommitmentsClient{} + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + if syncer.Client != k8sClient { + t.Error("Expected syncer to have the correct k8s client") + } + + if syncer.CommitmentsClient == nil { + t.Error("Expected syncer to have a commitments client") + } +} + +func TestSyncer_Init(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + mockClient := &mockCommitmentsClient{} + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + syncer.Init(context.Background()) + + if !mockClient.initCalled { + t.Error("Expected Init to be called on commitments client") + } +} + +func TestLimesUnitToResource(t *testing.T) { + tests := []struct { + name string + val int64 + unit string + expected string + shouldError bool + }{ + { + name: "no unit", + val: 100, + unit: "", + expected: "100", + }, + { + name: "bytes", + val: 1024, + unit: "B", + expected: "1Ki", + }, + { + name: "KiB", + val: 1, + unit: "KiB", + expected: "1Ki", + }, + { + name: "MiB", + val: 1, + unit: "MiB", + expected: "1Mi", + }, + { + name: "GiB", + val: 1, + unit: "GiB", + expected: "1Gi", + }, + { + name: "TiB", + val: 1, + unit: "TiB", + expected: "1Ti", + }, + { + name: "unsupported unit", + val: 100, + unit: "PiB", + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := limesUnitToResource(tt.val, tt.unit) + + if tt.shouldError { + if err == nil { + t.Error("Expected error but got none") + } + return + } + + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + if result.String() != tt.expected { + t.Errorf("Expected %s, got %s", tt.expected, result.String()) + } + }) + } +} + +func TestSyncer_SyncReservations_InstanceCommitments(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create mock commitments with instance flavors + mockCommitments := []Commitment{ + { + ID: 1, + UUID: "12345-67890-abcdef", + ServiceType: "compute", + ResourceName: "instances_test-flavor", + AvailabilityZone: "az1", + Amount: 2, // 2 instances + Unit: "", + ProjectID: "test-project-1", + DomainID: "test-domain-1", + Flavor: &Flavor{ + ID: "flavor-1", + Name: "test-flavor", + RAM: 1024, // 1GB in MB + VCPUs: 2, + Disk: 10, // 10GB + ExtraSpecs: map[string]string{"key": "value"}, + }, + }, + } + + mockClient := &mockCommitmentsClient{ + commitments: mockCommitments, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err != nil { + t.Errorf("SyncReservations() error = %v", err) + return + } + + // Verify that reservations were created + var reservations v1alpha1.ComputeReservationList + err = k8sClient.List(context.Background(), &reservations) + if err != nil { + t.Errorf("Failed to list reservations: %v", err) + return + } + + // Should have 2 reservations (Amount = 2) + if len(reservations.Items) != 2 { + t.Errorf("Expected 2 reservations, got %d", len(reservations.Items)) + return + } + + // Verify the first reservation + res := reservations.Items[0] + if res.Spec.Kind != v1alpha1.ComputeReservationSpecKindInstance { + t.Errorf("Expected instance kind, got %v", res.Spec.Kind) + } + + if res.Spec.ProjectID != "test-project-1" { + t.Errorf("Expected project ID test-project-1, got %v", res.Spec.ProjectID) + } + + if res.Spec.Instance.Flavor != "test-flavor" { + t.Errorf("Expected flavor test-flavor, got %v", res.Spec.Instance.Flavor) + } + + // Check resource values + expectedMemory := resource.MustParse("1073741824") // 1024MB in bytes + if !res.Spec.Instance.Memory.Equal(expectedMemory) { + t.Errorf("Expected memory %v, got %v", expectedMemory, res.Spec.Instance.Memory) + } + + expectedVCPUs := resource.MustParse("2") + if !res.Spec.Instance.VCPUs.Equal(expectedVCPUs) { + t.Errorf("Expected vCPUs %v, got %v", expectedVCPUs, res.Spec.Instance.VCPUs) + } + + expectedDisk := resource.MustParse("10737418240") // 10GB in bytes + if !res.Spec.Instance.Disk.Equal(expectedDisk) { + t.Errorf("Expected disk %v, got %v", expectedDisk, res.Spec.Instance.Disk) + } +} + +func TestSyncer_SyncReservations_BareResourceCommitments(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create mock commitments with bare resources + mockCommitments := []Commitment{ + { + ID: 2, + UUID: "abcdef-12345-67890", + ServiceType: "compute", + ResourceName: "cores", + AvailabilityZone: "az1", + Amount: 4, + Unit: "", + ProjectID: "test-project-2", + DomainID: "test-domain-2", + }, + { + ID: 3, + UUID: "fedcba-54321-09876", + ServiceType: "compute", + ResourceName: "ram", + AvailabilityZone: "az1", + Amount: 2048, + Unit: "MiB", + ProjectID: "test-project-3", + DomainID: "test-domain-3", + }, + } + + mockClient := &mockCommitmentsClient{ + commitments: mockCommitments, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err != nil { + t.Errorf("SyncReservations() error = %v", err) + return + } + + // Verify that reservations were created + var reservations v1alpha1.ComputeReservationList + err = k8sClient.List(context.Background(), &reservations) + if err != nil { + t.Errorf("Failed to list reservations: %v", err) + return + } + + // Should have 2 reservations (one for cores, one for ram) + if len(reservations.Items) != 2 { + t.Errorf("Expected 2 reservations, got %d", len(reservations.Items)) + return + } + + // Find the cores reservation + var coresRes *v1alpha1.ComputeReservation + var ramRes *v1alpha1.ComputeReservation + + for i := range reservations.Items { + if reservations.Items[i].Name == "commitment-abcde" { + coresRes = &reservations.Items[i] + } else if reservations.Items[i].Name == "commitment-fedcb" { + ramRes = &reservations.Items[i] + } + } + + if coresRes == nil { + t.Error("Expected to find cores reservation") + return + } + + if ramRes == nil { + t.Error("Expected to find ram reservation") + return + } + + // Verify cores reservation + if coresRes.Spec.Kind != v1alpha1.ComputeReservationSpecKindBareResource { + t.Errorf("Expected bare resource kind, got %v", coresRes.Spec.Kind) + } + + expectedCPU := resource.MustParse("4") + if !coresRes.Spec.BareResource.CPU.Equal(expectedCPU) { + t.Errorf("Expected CPU %v, got %v", expectedCPU, coresRes.Spec.BareResource.CPU) + } + + // Verify ram reservation + expectedMemory := resource.MustParse("2147483648") // 2048 MiB in bytes + if !ramRes.Spec.BareResource.Memory.Equal(expectedMemory) { + t.Errorf("Expected memory %v, got %v", expectedMemory, ramRes.Spec.BareResource.Memory) + } +} + +func TestSyncer_SyncReservations_UpdateExisting(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + // Create an existing reservation + existingReservation := &v1alpha1.ComputeReservation{ + ObjectMeta: ctrl.ObjectMeta{ + Name: "commitment-12345-0", // Instance commitments have -0 suffix + }, + Spec: v1alpha1.ComputeReservationSpec{ + Kind: v1alpha1.ComputeReservationSpecKindInstance, + ProjectID: "old-project", + Instance: v1alpha1.ComputeReservationSpecInstance{ + Flavor: "old-flavor", + Memory: resource.MustParse("512Mi"), + VCPUs: resource.MustParse("1"), + Disk: resource.MustParse("5Gi"), + }, + }, + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(existingReservation). + Build() + + // Create mock commitment that should update the existing reservation + mockCommitments := []Commitment{ + { + ID: 1, + UUID: "12345-67890-abcdef", + ServiceType: "compute", + ResourceName: "instances_new-flavor", + AvailabilityZone: "az1", + Amount: 1, + Unit: "", + ProjectID: "new-project", + DomainID: "new-domain", + Flavor: &Flavor{ + ID: "flavor-2", + Name: "new-flavor", + RAM: 2048, // 2GB in MB + VCPUs: 4, + Disk: 20, // 20GB + ExtraSpecs: map[string]string{"new": "spec"}, + }, + }, + } + + mockClient := &mockCommitmentsClient{ + commitments: mockCommitments, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err != nil { + t.Errorf("SyncReservations() error = %v", err) + return + } + + // Verify that the reservation was updated + var updatedReservation v1alpha1.ComputeReservation + err = k8sClient.Get(context.Background(), client.ObjectKey{Name: "commitment-12345-0"}, &updatedReservation) + if err != nil { + t.Errorf("Failed to get updated reservation: %v", err) + return + } + + // Verify the reservation was updated with new values + if updatedReservation.Spec.ProjectID != "new-project" { + t.Errorf("Expected project ID new-project, got %v", updatedReservation.Spec.ProjectID) + } + + if updatedReservation.Spec.Instance.Flavor != "new-flavor" { + t.Errorf("Expected flavor new-flavor, got %v", updatedReservation.Spec.Instance.Flavor) + } +} + +func TestSyncer_SyncReservations_Error(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + mockClient := &mockCommitmentsClient{ + shouldError: true, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err == nil { + t.Error("Expected error but got none") + } +} + +func TestSyncer_SyncReservations_ShortUUID(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create mock commitment with short UUID (should be skipped) + mockCommitments := []Commitment{ + { + ID: 1, + UUID: "123", // Too short + ServiceType: "compute", + ResourceName: "instances_test-flavor", + AvailabilityZone: "az1", + Amount: 1, + Unit: "", + ProjectID: "test-project", + DomainID: "test-domain", + Flavor: &Flavor{ + Name: "test-flavor", + }, + }, + } + + mockClient := &mockCommitmentsClient{ + commitments: mockCommitments, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err != nil { + t.Errorf("SyncReservations() error = %v", err) + return + } + + // Verify that no reservations were created due to short UUID + var reservations v1alpha1.ComputeReservationList + err = k8sClient.List(context.Background(), &reservations) + if err != nil { + t.Errorf("Failed to list reservations: %v", err) + return + } + + if len(reservations.Items) != 0 { + t.Errorf("Expected 0 reservations due to short UUID, got %d", len(reservations.Items)) + } +} + +func TestSyncer_SyncReservations_UnsupportedResource(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + // Create mock commitment with unsupported resource name + mockCommitments := []Commitment{ + { + ID: 1, + UUID: "12345-67890-abcdef", + ServiceType: "compute", + ResourceName: "unsupported_resource", + AvailabilityZone: "az1", + Amount: 1, + Unit: "", + ProjectID: "test-project", + DomainID: "test-domain", + }, + } + + mockClient := &mockCommitmentsClient{ + commitments: mockCommitments, + } + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + err := syncer.SyncReservations(context.Background()) + if err != nil { + t.Errorf("SyncReservations() error = %v", err) + return + } + + // Verify that no reservations were created due to unsupported resource + var reservations v1alpha1.ComputeReservationList + err = k8sClient.List(context.Background(), &reservations) + if err != nil { + t.Errorf("Failed to list reservations: %v", err) + return + } + + if len(reservations.Items) != 0 { + t.Errorf("Expected 0 reservations due to unsupported resource, got %d", len(reservations.Items)) + } +} + +// Note: The Run method starts a goroutine and runs indefinitely, so it's difficult to test +// in a unit test without complex synchronization. In a real-world scenario, you might +// want to add a context cancellation mechanism or a way to stop the sync loop for testing. +func TestSyncer_Run(t *testing.T) { + scheme := runtime.NewScheme() + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add scheme: %v", err) + } + + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + mockClient := &mockCommitmentsClient{} + + syncer := &Syncer{ + CommitmentsClient: mockClient, + Client: k8sClient, + } + + // Test that Run doesn't panic when called + // We can't easily test the actual loop behavior without complex timing + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately to avoid infinite loop + + // This should not panic + syncer.Run(ctx) +} From b7cf62ef95ed225f8c6077163628514db4bdff22 Mon Sep 17 00:00:00 2001 From: Philipp Matthes Date: Wed, 27 Aug 2025 12:59:09 +0200 Subject: [PATCH 2/3] Fix workflow: read reservations coverage from correct path --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 4f0dbdc0c..2f4629217 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -114,7 +114,7 @@ jobs: let reservationsCoverageReport = ''; let reservationsCoveragePercentage = 'unknown'; try { - reservationsCoverageReport = fs.readFileSync('reservations_func_coverage.txt', 'utf8'); + reservationsCoverageReport = fs.readFileSync('reservations/reservations_func_coverage.txt', 'utf8'); const reservationsLines = reservationsCoverageReport.trim().split('\n'); const reservationsLastLine = reservationsLines[reservationsLines.length - 1]; const reservationsCoverageMatch = reservationsLastLine.match(/total:\s+\(statements\)\s+(\d+\.\d+)%/); From 1ff686648958759a2b1ab0e4c8c435d19e153305 Mon Sep 17 00:00:00 2001 From: "Dr. Philipp Matthes" <27271818+PhilippMatthes@users.noreply.github.com> Date: Wed, 27 Aug 2025 15:09:47 +0200 Subject: [PATCH 3/3] Update cortex.secrets.example.yaml Co-authored-by: Markus Wieland <44964229+SoWieMarkus@users.noreply.github.com> --- cortex.secrets.example.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cortex.secrets.example.yaml b/cortex.secrets.example.yaml index 7c6aa6438..bdbb49056 100644 --- a/cortex.secrets.example.yaml +++ b/cortex.secrets.example.yaml @@ -21,7 +21,7 @@ sharedSSOCert: &sharedSSOCert selfSigned: false # Shared keystone credentials to use. -keystone: &keystone: +keystone: &keystone url: https://path-to-keystone/v3 sso: *sharedSSOCert username: openstack-user-with-all-project-read-access