diff --git a/.golangci.yml b/.golangci.yml index 48fb6a144b6..7d87bbc228d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -107,6 +107,26 @@ issues: - path: mongo_scaler.go linters: - dupl + # Exclude gci check for //+kubebuilder:scaffold:imports comments. Waiting to + # resolve https://github.com/kedacore/keda/issues/4379 + - path: cmd/operator/main.go + linters: + - gci + - path: cmd/webhooks/main.go + linters: + - gci + - path: controllers/keda/suite_test.go + linters: + - gci + - path: apis/keda/v1alpha1/scaledobject_webhook_test.go + linters: + - gci + # Exclude for azure_pipelines_scaler, reason: + # pkg/scalers/azure_pipelines_scaler.go:408:10: badCond: `countDemands == len(demandsInJob) && countDemands == len(demandsInScaler)` condition is suspicious (gocritic) + - path: azure_pipelines_scaler.go + linters: + - gocritic + linters-settings: funlen: diff --git a/BUILD.md b/BUILD.md index e417655c7a6..28f798891d8 100644 --- a/BUILD.md +++ b/BUILD.md @@ -287,7 +287,8 @@ Follow these instructions if you want to debug the KEDA webhook using VS Code. clientConfig: url: "https://${YOUR_URL}/validate-keda-sh-v1alpha1-scaledobject" ``` - **Note:** You could need to define also the key `caBundle` with the CA bundle encoded in base64 if the cluster can get it during the manifest apply (this happens with localtunnel for instance) + **Note:** You need to define also the key `caBundle` with the CA bundle encoded in base64. This `caBundle` is the pem file from the CA used to sign the certificate. Remember to disable the `caBundle` inyection to avoid unintended rewrites of your `caBundle` (by KEDA operator or by any other 3rd party) + 4. Deploy CRDs and KEDA into `keda` namespace ```bash diff --git a/CHANGELOG.md b/CHANGELOG.md index 806c96b7704..7b99d8a895b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## History - [Unreleased](#unreleased) +- [v2.10.1](#v2101) - [v2.10.0](#v2100) - [v2.9.3](#v293) - [v2.9.2](#v292) @@ -67,6 +68,20 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX)) +## v2.10.1 + +### Fixes + +- **Admission Webhooks**: Allow to remove the finalizer even if the ScaledObject isn't valid ([#4396](https://github.com/kedacore/keda/issue/4396)) +- **AWS SQS Scaler**: Respect `scaleOnInFlight` value ([#4276](https://github.com/kedacore/keda/issue/4276)) +- **Azure Pipelines**: Fix for disallowing `$top` on query when using `meta.parentID` method ([#4397]) +- **Azure Pipelines**: Respect all required demands ([#4404](https://github.com/kedacore/keda/issues/4404)) + +### Other + +- **General**: Drop a transitive dependency on bou.ke/monkey ([#4364](https://github.com/kedacore/keda/issues/4364)) +- **General**: Fix odd number of arguments passed as key-value pairs for logging ([#4368](https://github.com/kedacore/keda/issues/4368)) + ## v2.10.0 ### New diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go index 3f6177db06a..a811b01411f 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook.go +++ b/apis/keda/v1alpha1/scaledobject_webhook.go @@ -62,6 +62,12 @@ func (so *ScaledObject) ValidateCreate() error { func (so *ScaledObject) ValidateUpdate(old runtime.Object) error { val, _ := json.MarshalIndent(so, "", " ") scaledobjectlog.V(1).Info(fmt.Sprintf("validating scaledobject update for %s", string(val))) + + if isRemovingFinalizer(so, old) { + scaledobjectlog.V(1).Info("finalizer removal, skipping validation") + return nil + } + return validateWorkload(so, "update") } @@ -69,6 +75,17 @@ func (so *ScaledObject) ValidateDelete() error { return nil } +func isRemovingFinalizer(so *ScaledObject, old runtime.Object) bool { + oldSo := old.(*ScaledObject) + + soSpec, _ := json.MarshalIndent(so.Spec, "", " ") + oldSoSpec, _ := json.MarshalIndent(oldSo.Spec, "", " ") + soSpecString := string(soSpec) + oldSoSpecString := string(oldSoSpec) + + return len(so.ObjectMeta.Finalizers) == 0 && len(oldSo.ObjectMeta.Finalizers) == 1 && soSpecString == oldSoSpecString +} + func validateWorkload(so *ScaledObject, action string) error { prommetrics.RecordScaledObjectValidatingTotal(so.Namespace, action) err := verifyCPUMemoryScalers(so, action) diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go index 9d2ad532f3b..7113dadf0f1 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook_test.go +++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go @@ -154,8 +154,9 @@ var _ = It("should validate the so creation when there isn't any hpa", func() { err := k8sClient.Create(context.Background(), namespace) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("should validate the so creation when there are other SO for other workloads", func() { @@ -171,8 +172,9 @@ var _ = It("should validate the so creation when there are other SO for other wo err = k8sClient.Create(context.Background(), so1) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so2) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so2) + }).ShouldNot(HaveOccurred()) }) var _ = It("should validate the so creation when there are other HPA for other workloads", func() { @@ -188,8 +190,9 @@ var _ = It("should validate the so creation when there are other HPA for other w err = k8sClient.Create(context.Background(), hpa) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("should validate the so creation when it's own hpa is already generated", func() { @@ -206,8 +209,9 @@ var _ = It("should validate the so creation when it's own hpa is already generat err = k8sClient.Create(context.Background(), hpa) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("should validate the so update when it's own hpa is already generated", func() { @@ -228,8 +232,9 @@ var _ = It("should validate the so update when it's own hpa is already generated Expect(err).ToNot(HaveOccurred()) so.Spec.MaxReplicaCount = pointer.Int32(7) - err = k8sClient.Update(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Update(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("shouldn't validate the so creation when there is another unmanaged hpa", func() { @@ -246,8 +251,9 @@ var _ = It("shouldn't validate the so creation when there is another unmanaged h err = k8sClient.Create(context.Background(), hpa) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("shouldn't validate the so creation when there is another so", func() { @@ -264,8 +270,9 @@ var _ = It("shouldn't validate the so creation when there is another so", func() err = k8sClient.Create(context.Background(), so2) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("shouldn't validate the so creation when there is another hpa with custom apis", func() { @@ -282,8 +289,9 @@ var _ = It("shouldn't validate the so creation when there is another hpa with cu err = k8sClient.Create(context.Background(), hpa) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("should validate the so creation with cpu and memory when deployment has requests", func() { @@ -299,8 +307,9 @@ var _ = It("should validate the so creation with cpu and memory when deployment err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("shouldn't validate the so creation with cpu and memory when deployment hasn't got memory request", func() { @@ -316,8 +325,9 @@ var _ = It("shouldn't validate the so creation with cpu and memory when deployme err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("shouldn't validate the so creation with cpu and memory when deployment hasn't got cpu request", func() { @@ -333,8 +343,9 @@ var _ = It("shouldn't validate the so creation with cpu and memory when deployme err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("should validate the so creation with cpu and memory when statefulset has requests", func() { @@ -350,8 +361,9 @@ var _ = It("should validate the so creation with cpu and memory when statefulset err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = It("shouldn't validate the so creation with cpu and memory when statefulset hasn't got memory request", func() { @@ -367,8 +379,9 @@ var _ = It("shouldn't validate the so creation with cpu and memory when stateful err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("shouldn't validate the so creation with cpu and memory when statefulset hasn't got cpu request", func() { @@ -384,8 +397,9 @@ var _ = It("shouldn't validate the so creation with cpu and memory when stateful err = k8sClient.Create(context.Background(), workload) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) - Expect(err).To(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) }) var _ = It("should validate the so creation without cpu and memory when custom resources", func() { @@ -397,8 +411,88 @@ var _ = It("should validate the so creation without cpu and memory when custom r err := k8sClient.Create(context.Background(), namespace) Expect(err).ToNot(HaveOccurred()) - err = k8sClient.Create(context.Background(), so) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("should validate so creation when all requirements are met for scaling to zero with cpu scaler", func() { + namespaceName := "scale-to-zero-good" + namespace := createNamespace(namespaceName) + workload := createDeployment(namespaceName, true, false) + + so := createScaledObjectSTZ(soName, namespaceName, workloadName, 0, 5, true) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("shouldn't validate so creation with cpu scaler requirements not being met for scaling to 0", func() { + namespaceName := "scale-to-zero-min-replicas-bad" + namespace := createNamespace(namespaceName) + workload := createDeployment(namespaceName, true, false) + + so := createScaledObjectSTZ(soName, namespaceName, workloadName, 0, 5, false) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) +}) + +var _ = It("should validate so creation when min replicas is > 0 with only cpu scaler given", func() { + namespaceName := "scale-to-zero-no-external-trigger-good" + namespace := createNamespace(namespaceName) + workload := createDeployment(namespaceName, true, false) + + so := createScaledObjectSTZ(soName, namespaceName, workloadName, 1, 5, false) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) + +}) + +var _ = It("should validate the so update if it's removing the finalizer even if it's invalid", func() { + + namespaceName := "removing-finalizers" + namespace := createNamespace(namespaceName) + workload := createDeployment(namespaceName, true, true) + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", true) + so.ObjectMeta.Finalizers = append(so.ObjectMeta.Finalizers, "finalizer") + + err := k8sClient.Create(context.Background(), namespace) Expect(err).ToNot(HaveOccurred()) + + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) + + workload.Spec.Template.Spec.Containers[0].Resources.Requests = nil + err = k8sClient.Update(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + + so.ObjectMeta.Finalizers = []string{} + Eventually(func() error { + return k8sClient.Update(context.Background(), so) + }).ShouldNot(HaveOccurred()) }) var _ = AfterSuite(func() { @@ -595,3 +689,44 @@ func createStatefulSet(namespace string, hasCPU, hasMemory bool) *appsv1.Statefu }, } } + +func createScaledObjectSTZ(name string, namespace string, targetName string, minReplicas int32, maxReplicas int32, hasExternalTrigger bool) *ScaledObject { + triggers := []ScaleTriggers{ + { + Type: "cpu", + Metadata: map[string]string{ + "value": "10", + }, + }, + } + if hasExternalTrigger { + kubeWorkloadTrigger := ScaleTriggers{ + Type: "kubernetes-workload", + Metadata: map[string]string{ + "podSelector": "pod=workload-test", + "value": "1", + }, + } + triggers = append(triggers, kubeWorkloadTrigger) + } + return &ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID(name), + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ScaledObject", + APIVersion: "keda.sh", + }, + Spec: ScaledObjectSpec{ + ScaleTargetRef: &ScaleTarget{ + Name: targetName, + }, + MinReplicaCount: pointer.Int32(minReplicas), + MaxReplicaCount: pointer.Int32(maxReplicas), + CooldownPeriod: pointer.Int32(1), + Triggers: triggers, + }, + } +} diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index 522fb83bd23..3a293fef371 100644 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -220,7 +220,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg return "ScaledObject doesn't have correct Idle/Min/Max Replica Counts specification", err } - err = r.checkTriggers(scaledObject) + err = r.checkTriggers(logger, scaledObject) if err != nil { return "ScaledObject doesn't have correct triggers specification", err } @@ -337,7 +337,7 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte // checkTriggers checks that general trigger metadata are valid, it checks: // - triggerNames in ScaledObject are unique // - useCachedMetrics is defined only for a supported triggers -func (r *ScaledObjectReconciler) checkTriggers(scaledObject *kedav1alpha1.ScaledObject) error { +func (r *ScaledObjectReconciler) checkTriggers(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error { triggersCount := len(scaledObject.Spec.Triggers) if triggersCount > 1 { @@ -355,14 +355,14 @@ func (r *ScaledObjectReconciler) checkTriggers(scaledObject *kedav1alpha1.Scaled _, hasMetricName := trigger.Metadata["metricName"] // aws-cloudwatch and huawei-cloudeye have a meaningful use of metricName if hasMetricName && trigger.Type != "aws-cloudwatch" && trigger.Type != "huawei-cloudeye" { - log.Log.Info("metricName is deprecated and will be removed in v2.12, please do not set it anymore (used in %q)", trigger.Type) + logger.Info("\"metricName\" is deprecated and will be removed in v2.12, please do not set it anymore", "trigger.type", trigger.Type) } name := trigger.Name if name != "" { if _, found := triggerNames[name]; found { // found duplicate name - return fmt.Errorf("triggerName=%s is defined multiple times in the ScaledObject, but it must be unique", name) + return fmt.Errorf("triggerName %q is defined multiple times in the ScaledObject, but it must be unique", name) } triggerNames[name] = true } diff --git a/go.mod b/go.mod index f2e7aa23a59..b105a6dd135 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/gocql/gocql v1.3.1 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 + github.com/google/go-github/v50 v50.1.0 github.com/google/uuid v1.3.0 github.com/gophercloud/gophercloud v1.2.0 github.com/hashicorp/vault/api v1.9.0 @@ -98,6 +99,9 @@ replace ( // https://nvd.nist.gov/vuln/detail/CVE-2022-1996 github.com/emicklei/go-restful => github.com/emicklei/go-restful v2.16.0+incompatible + // we need to drop a transitive dependency on bou.ke/monkey -> https://github.com/kedacore/keda/issues/4364 + github.com/otiai10/mint => github.com/otiai10/mint v1.4.1 + // https://avd.aquasec.com/nvd/2022/cve-2022-27191/ golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 @@ -176,7 +180,6 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/cel-go v0.13.0 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-github/v50 v50.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 // indirect diff --git a/go.sum b/go.sum index b74733895a3..2ac26aedcf9 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -628,12 +627,9 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/otiai10/mint v1.4.1/go.mod h1:gifjb2MYOoULtKLqUAEILUG/9KONW6f7YsJ6vQLTlFI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index ae5f153ef87..be1c50cb87e 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -23,11 +23,15 @@ const ( defaultScaleOnInFlight = true ) -var awsSqsQueueMetricNames = []string{ +var awsSqsQueueMetricNamesForScalingInFlight = []string{ "ApproximateNumberOfMessages", "ApproximateNumberOfMessagesNotVisible", } +var awsSqsQueueMetricNamesForNotScalingInFlight = []string{ + "ApproximateNumberOfMessages", +} + type awsSqsQueueScaler struct { metricType v2.MetricTargetType metadata *awsSqsQueueMetadata @@ -45,6 +49,7 @@ type awsSqsQueueMetadata struct { awsAuthorization awsAuthorizationMetadata scalerIndex int scaleOnInFlight bool + awsSqsQueueMetricNames []string } // NewAwsSqsQueueScaler creates a new awsSqsQueueScaler @@ -104,10 +109,10 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs } } - if !meta.scaleOnInFlight { - awsSqsQueueMetricNames = []string{ - "ApproximateNumberOfMessages", - } + if meta.scaleOnInFlight { + meta.awsSqsQueueMetricNames = awsSqsQueueMetricNamesForScalingInFlight + } else { + meta.awsSqsQueueMetricNames = awsSqsQueueMetricNamesForNotScalingInFlight } if val, ok := config.TriggerMetadata["queueURL"]; ok && val != "" { @@ -198,7 +203,7 @@ func (s *awsSqsQueueScaler) GetMetricsAndActivity(ctx context.Context, metricNam // Get SQS Queue Length func (s *awsSqsQueueScaler) getAwsSqsQueueLength() (int64, error) { input := &sqs.GetQueueAttributesInput{ - AttributeNames: aws.StringSlice(awsSqsQueueMetricNames), + AttributeNames: aws.StringSlice(s.metadata.awsSqsQueueMetricNames), QueueUrl: aws.String(s.metadata.queueURL), } @@ -208,7 +213,7 @@ func (s *awsSqsQueueScaler) getAwsSqsQueueLength() (int64, error) { } var approximateNumberOfMessages int64 - for _, awsSqsQueueMetric := range awsSqsQueueMetricNames { + for _, awsSqsQueueMetric := range s.metadata.awsSqsQueueMetricNames { metricValue, err := strconv.ParseInt(*output.Attributes[awsSqsQueueMetric], 10, 32) if err != nil { return -1, err diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go index 8f00bfeed7c..fc7e4cdd971 100644 --- a/pkg/scalers/aws_sqs_queue_scaler_test.go +++ b/pkg/scalers/aws_sqs_queue_scaler_test.go @@ -307,10 +307,43 @@ var awsSQSMetricIdentifiers = []awsSQSMetricIdentifier{ {&testAWSSQSMetadata[1], 1, "s1-aws-sqs-DeleteArtifactQ"}, } -var awsSQSGetMetricTestData = []*awsSqsQueueMetadata{ - {queueURL: testAWSSQSProperQueueURL}, - {queueURL: testAWSSQSErrorQueueURL}, - {queueURL: testAWSSQSBadDataQueueURL}, +var awsSQSGetMetricTestData = []*parseAWSSQSMetadataTestData{ + {map[string]string{ + "queueURL": testAWSSQSProperQueueURL, + "queueLength": "1", + "awsRegion": "eu-west-1", + "scaleOnInFlight": "false"}, + testAWSSQSAuthentication, + testAWSSQSEmptyResolvedEnv, + false, + "not error with scaleOnInFlight disabled"}, + {map[string]string{ + "queueURL": testAWSSQSProperQueueURL, + "queueLength": "1", + "awsRegion": "eu-west-1", + "scaleOnInFlight": "true"}, + testAWSSQSAuthentication, + testAWSSQSEmptyResolvedEnv, + false, + "not error with scaleOnInFlight enabled"}, + {map[string]string{ + "queueURL": testAWSSQSErrorQueueURL, + "queueLength": "1", + "awsRegion": "eu-west-1", + "scaleOnInFlight": "false"}, + testAWSSQSAuthentication, + testAWSSQSEmptyResolvedEnv, + false, + "error queue"}, + {map[string]string{ + "queueURL": testAWSSQSBadDataQueueURL, + "queueLength": "1", + "awsRegion": "eu-west-1", + "scaleOnInFlight": "true"}, + testAWSSQSAuthentication, + testAWSSQSEmptyResolvedEnv, + false, + "bad data"}, } func TestSQSParseMetadata(t *testing.T) { @@ -343,8 +376,13 @@ func TestAWSSQSGetMetricSpecForScaling(t *testing.T) { } func TestAWSSQSScalerGetMetrics(t *testing.T) { - for _, meta := range awsSQSGetMetricTestData { + for index, testData := range awsSQSGetMetricTestData { + meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams, ScalerIndex: index}, logr.Discard()) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } scaler := awsSqsQueueScaler{"", meta, &mockSqs{}, logr.Discard()} + value, _, err := scaler.GetMetricsAndActivity(context.Background(), "MetricName") switch meta.queueURL { case testAWSSQSErrorQueueURL: diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index ed9c3af7c6d..9b2dfa7cbb1 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -332,7 +332,13 @@ func getAzurePipelineRequest(ctx context.Context, url string, metadata *azurePip } func (s *azurePipelinesScaler) GetAzurePipelinesQueueLength(ctx context.Context) (int64, error) { - url := fmt.Sprintf("%s/_apis/distributedtask/pools/%d/jobrequests?$top=%d", s.metadata.organizationURL, s.metadata.poolID, s.metadata.jobsToFetch) + // HotFix Issue (#4387), $top changes the format of the returned JSON + var url string + if s.metadata.parent != "" { + url = fmt.Sprintf("%s/_apis/distributedtask/pools/%d/jobrequests", s.metadata.organizationURL, s.metadata.poolID) + } else { + url = fmt.Sprintf("%s/_apis/distributedtask/pools/%d/jobrequests?$top=%d", s.metadata.organizationURL, s.metadata.poolID, s.metadata.jobsToFetch) + } body, err := getAzurePipelineRequest(ctx, url, s.metadata, s.httpClient) if err != nil { return -1, err @@ -379,26 +385,35 @@ func stripDeadJobs(jobs []JobRequest) []JobRequest { return filtered } +func stripAgentVFromArray(array []string) []string { + var result []string + + for _, item := range array { + if !strings.HasPrefix(item, "Agent.Version") { + result = append(result, item) + } + } + return result +} + // Determine if the scaledjob has the right demands to spin up func getCanAgentDemandFulfilJob(jr JobRequest, metadata *azurePipelinesMetadata) bool { - var demandsReq = jr.Demands - var demandsAvail = strings.Split(metadata.demands, ",") - var countDemands = 0 - for _, dr := range demandsReq { - for _, da := range demandsAvail { - strDr := fmt.Sprintf("%v", dr) - if !strings.HasPrefix(strDr, "Agent.Version") { - if strDr == da { - countDemands++ - } + countDemands := 0 + demandsInJob := stripAgentVFromArray(jr.Demands) + demandsInScaler := stripAgentVFromArray(strings.Split(metadata.demands, ",")) + + for _, demandInJob := range demandsInJob { + for _, demandInScaler := range demandsInScaler { + if demandInJob == demandInScaler { + countDemands++ } } } - matchDemands := countDemands == len(demandsReq)-1 + if metadata.requireAllDemands { - return matchDemands && countDemands == len(demandsAvail) + return countDemands == len(demandsInJob) && countDemands == len(demandsInScaler) } - return matchDemands + return countDemands == len(demandsInJob) } // Determine if the Job and Parent Agent Template have matching capabilities diff --git a/pkg/util/welcome.go b/pkg/util/welcome.go index f6f2bb203da..774d69730eb 100644 --- a/pkg/util/welcome.go +++ b/pkg/util/welcome.go @@ -26,7 +26,7 @@ import ( ) const ( - minSupportedVersion = 23 + minSupportedVersion = 24 maxSupportedVersion = 26 ) diff --git a/tests/.env b/tests/.env index c6adc20e638..46c32827cb6 100644 --- a/tests/.env +++ b/tests/.env @@ -9,9 +9,11 @@ TF_AZURE_APP_INSIGHTS_NAME= TF_AZURE_DATA_EXPLORER_DB= TF_AZURE_DATA_EXPLORER_ENDPOINT= AZURE_DEVOPS_BUILD_DEFINITION_ID= +AZURE_DEVOPS_DEMAND_PARENT_BUILD_DEFINITION_ID= AZURE_DEVOPS_ORGANIZATION_URL= AZURE_DEVOPS_PAT= AZURE_DEVOPS_POOL_NAME= +AZURE_DEVOPS_DEMAND_POOL_NAME= AZURE_DEVOPS_PROJECT= TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING= TF_AZURE_KEYVAULT_URI= diff --git a/tests/scalers/azure/azure_pipelines_adv/azure_pipelines_adv_test.go b/tests/scalers/azure/azure_pipelines_adv/azure_pipelines_adv_test.go new file mode 100644 index 00000000000..2a0efd8a398 --- /dev/null +++ b/tests/scalers/azure/azure_pipelines_adv/azure_pipelines_adv_test.go @@ -0,0 +1,554 @@ +//go:build e2e +// +build e2e + +package azure_pipelines_adv_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/joho/godotenv" + "github.com/microsoft/azure-devops-go-api/azuredevops" + "github.com/microsoft/azure-devops-go-api/azuredevops/build" + "github.com/microsoft/azure-devops-go-api/azuredevops/taskagent" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "azure-pipelines-demands-test" +) + +var ( + organizationURL = os.Getenv("AZURE_DEVOPS_ORGANIZATION_URL") + personalAccessToken = os.Getenv("AZURE_DEVOPS_PAT") + project = os.Getenv("AZURE_DEVOPS_PROJECT") + demandParentBuildID = os.Getenv("AZURE_DEVOPS_DEMAND_PARENT_BUILD_DEFINITION_ID") + poolName = os.Getenv("AZURE_DEVOPS_DEMAND_POOL_NAME") + poolID = "0" + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + scaledJobName = fmt.Sprintf("%s-sj", testName) + minReplicaCount = 0 + maxReplicaCount = 1 +) + +type templateData struct { + TestNamespace string + SecretName string + DeploymentName string + ScaledObjectName string + ScaledJobName string + MinReplicaCount string + MaxReplicaCount string + Pat string + URL string + PoolName string + PoolID string + SeedType string +} + +const ( + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + personalAccessToken: {{.Pat}} +` + seedDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.SeedType}}-template + namespace: {{.TestNamespace}} + labels: + app: azdevops-agent +spec: + replicas: 1 + selector: + matchLabels: + app: azdevops-agent + template: + metadata: + labels: + app: azdevops-agent + spec: + terminationGracePeriodSeconds: 90 + containers: + - name: azdevops-agent + lifecycle: + preStop: + exec: + command: ["/bin/sleep","60"] + image: eldarrin/azure:main + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} + - name: AZP_AGENT_NAME + value: {{.SeedType}}-template + - name: {{.SeedType}} + value: "true" +` + demandScaledJobTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.SeedType}}-agent-demand-sj + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + metadata: + labels: + app: {{.ScaledJobName}} + spec: + containers: + - name: {{.ScaledJobName}} + image: eldarrin/azure:main + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} + - name: {{.SeedType}} + value: "true" + restartPolicy: Never + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + personalAccessTokenFromEnv: "AZP_TOKEN" + poolName: "{{.PoolName}}" + demands: "{{.SeedType}}" +` + demandRequireAllScaledJobTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.SeedType}}-alldemand-sj + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + metadata: + labels: + app: {{.ScaledJobName}} + spec: + containers: + - name: {{.ScaledJobName}} + image: eldarrin/azure:main + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} + - name: {{.SeedType}} + value: "true" + restartPolicy: Never + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + personalAccessTokenFromEnv: "AZP_TOKEN" + poolName: "{{.PoolName}}" + demands: "{{.SeedType}}" + requireAllDemands: "true" +` + + parentScaledJobTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.SeedType}}-parent-sj + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + metadata: + labels: + app: {{.ScaledJobName}} + spec: + containers: + - name: {{.ScaledJobName}} + image: eldarrin/azure:main + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} + - name: {{.SeedType}} + value: "true" + restartPolicy: Never + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + personalAccessTokenFromEnv: "AZP_TOKEN" + poolName: "{{.PoolName}}" + parent: {{.SeedType}}-template +` + anyScaledJobTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.SeedType}}-any-sj + namespace: {{.TestNamespace}} +spec: + jobTargetRef: + template: + metadata: + labels: + app: {{.ScaledJobName}} + spec: + containers: + - name: {{.ScaledJobName}} + image: eldarrin/azure:main + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} + - name: {{.SeedType}} + value: "true" + restartPolicy: Never + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + personalAccessTokenFromEnv: "AZP_TOKEN" + poolName: "{{.PoolName}}" +` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + require.NotEmpty(t, organizationURL, "AZURE_DEVOPS_ORGANIZATION_URL env variable is required for azure pipelines test") + require.NotEmpty(t, personalAccessToken, "AZURE_DEVOPS_PAT env variable is required for azure pipelines test") + require.NotEmpty(t, project, "AZURE_DEVOPS_PROJECT env variable is required for azure pipelines test") + require.NotEmpty(t, demandParentBuildID, "AZURE_DEVOPS_DEMAND_PARENT_BUILD_DEFINITION_ID env variable is required for azure pipelines test") + require.NotEmpty(t, poolName, "AZURE_DEVOPS_DEMAND_POOL_NAME env variable is required for azure pipelines test") + connection := azuredevops.NewPatConnection(organizationURL, personalAccessToken) + clearAllBuilds(t, connection) + // Get pool ID + poolID = fmt.Sprintf("%d", getAzDoPoolID(t, connection)) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // seed never runner jobs and setup Azure DevOps + err := preSeedAgentPool(t, data) + require.NoError(t, err) + + WaitForPodCountInNamespace(t, kc, testNamespace, minReplicaCount, 60, 2) + // new demand tests (assumes pre-seeded template) + + KubectlApplyWithTemplate(t, data, "demandScaledJobTemplate", demandScaledJobTemplate) + testJobScaleOut(t, kc, connection) + testJobScaleIn(t, kc) + KubectlDeleteWithTemplate(t, data, "demandScaledJobTemplate", demandScaledJobTemplate) + + KubectlApplyWithTemplate(t, data, "parentScaledJobTemplate", parentScaledJobTemplate) + testJobScaleOut(t, kc, connection) + testJobScaleIn(t, kc) + KubectlDeleteWithTemplate(t, data, "parentScaledJobTemplate", parentScaledJobTemplate) + + KubectlApplyWithTemplate(t, data, "anyScaledJobTemplate", anyScaledJobTemplate) + testJobScaleOut(t, kc, connection) + testJobScaleIn(t, kc) + KubectlDeleteWithTemplate(t, data, "anyScaledJobTemplate", anyScaledJobTemplate) + + KubectlApplyWithTemplate(t, data, "demandRequireAllScaledJobTemplate", demandRequireAllScaledJobTemplate) + testJobScaleOut(t, kc, connection) + testJobScaleIn(t, kc) + KubectlDeleteWithTemplate(t, data, "demandRequireAllScaledJobTemplate", demandRequireAllScaledJobTemplate) + + DeleteKubernetesResources(t, kc, testNamespace, data, templates) + CleanUpAdo(t, data) +} + +func getAzDoPoolID(t *testing.T, connection *azuredevops.Connection) int { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + taskClient, err := taskagent.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create task agent client") + } + args := taskagent.GetAgentPoolsArgs{ + PoolName: &poolName, + } + pools, err := taskClient.GetAgentPools(ctx, args) + if err != nil { + t.Errorf("unable to get the pools") + } + return *(*pools)[0].Id +} + +func queueBuild(t *testing.T, connection *azuredevops.Connection, bid int) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + buildClient, err := build.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create build client") + } + args := build.QueueBuildArgs{ + Project: &project, + Build: &build.Build{ + Definition: &build.DefinitionReference{ + Id: &bid, + }, + }, + } + _, err = buildClient.QueueBuild(ctx, args) + if err != nil { + t.Errorf("unable to get the pools") + } +} + +func clearAllBuilds(t *testing.T, connection *azuredevops.Connection) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + buildClient, err := build.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create build client") + } + var top = 20 + args := build.GetBuildsArgs{ + Project: &project, + StatusFilter: &build.BuildStatusValues.All, + QueryOrder: &build.BuildQueryOrderValues.QueueTimeDescending, + Top: &top, + } + azBuilds, err := buildClient.GetBuilds(ctx, args) + if err != nil { + t.Errorf("unable to get builds") + } + for _, azBuild := range azBuilds.Value { + azBuild.Status = &build.BuildStatusValues.Cancelling + args := build.UpdateBuildArgs{ + Build: &azBuild, + Project: &project, + BuildId: azBuild.Id, + } + _, err = buildClient.UpdateBuild(ctx, args) + if err != nil { + t.Errorf("unable to cancel build") + } + } +} + +func getTemplateData() (templateData, []Template) { + base64Pat := base64.StdEncoding.EncodeToString([]byte(personalAccessToken)) + + return templateData{ + TestNamespace: testNamespace, + SecretName: secretName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + ScaledJobName: scaledJobName, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + Pat: base64Pat, + URL: organizationURL, + PoolName: poolName, + PoolID: poolID, + SeedType: "golang", // must match the pipeline's demand + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + } +} + +func testJobScaleOut(t *testing.T, kc *kubernetes.Clientset, connection *azuredevops.Connection) { + t.Log("--- testing scale out ---") + id, err := strconv.Atoi(demandParentBuildID) + if err != nil { + t.Errorf("unable to parse buildID") + } + queueBuild(t, connection, id) + + assert.True(t, WaitForJobCount(t, kc, testNamespace, 1, 180, 1), "replica count should be 1 after 3 minutes") +} + +func testJobScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + assert.True(t, WaitForAllJobsSuccess(t, kc, testNamespace, 60, 5), "jobs should be completed after 1 minute") + DeletePodsInNamespaceBySelector(t, kc, "app="+scaledJobName, testNamespace) +} + +// preSeed Agent Pool to stop AzDO auto failing unfulfillable jobs +func preSeedAgentPool(t *testing.T, data templateData) error { + naData := data + naData.SeedType = "never" + naData.ScaledJobName = "never-agent-demand-scaledjob" + KubectlApplyWithTemplate(t, naData, "demandScaledJobTemplate", demandScaledJobTemplate) + + naData.ScaledJobName = "never-agent-parent-scaledjob" + KubectlApplyWithTemplate(t, naData, "parentScaledJobTemplate", parentScaledJobTemplate) + + err := KubectlApplyWithErrors(t, naData, "deploymentTemplateSeed", seedDeploymentTemplate) + if err != nil { + return err + } + + err = KubectlApplyWithErrors(t, data, "deploymentTemplateSeed", seedDeploymentTemplate) + if err != nil { + return err + } + // wait for deployment to be ready in AzDO + for !checkAgentState(t, data, "online") { + time.Sleep(10 * time.Second) + } + for !checkAgentState(t, naData, "online") { + time.Sleep(10 * time.Second) + } + // delete the deployment + KubectlDeleteWithTemplate(t, naData, "deploymentTemplateSeed", seedDeploymentTemplate) + KubectlDeleteWithTemplate(t, data, "deploymentTemplateSeed", seedDeploymentTemplate) + for !checkAgentState(t, data, "offline") { + time.Sleep(10 * time.Second) + } + for !checkAgentState(t, naData, "offline") { + time.Sleep(10 * time.Second) + } + return nil +} + +// isAgentPoolReady checks if the agent pool is ready +func checkAgentState(t *testing.T, data templateData, state string) bool { + // get the agent pool id + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + connection := azuredevops.NewPatConnection(data.URL, personalAccessToken) + taskClient, err := taskagent.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create task agent client, %s", err) + } + + args := taskagent.GetAgentPoolsArgs{ + PoolName: &data.PoolName, + } + pools, err := taskClient.GetAgentPools(ctx, args) + if err != nil { + t.Errorf("unable to get the pools, %s", err) + return false + } + + poolID := *(*pools)[0].Id + + agents, err := taskClient.GetAgents(ctx, taskagent.GetAgentsArgs{PoolId: &poolID}) + if err != nil { + t.Errorf("unable to get the agent, %s", err) + return false + } + + tState := taskagent.TaskAgentStatus(state) + + for _, agent := range *agents { + if *agent.Enabled && *agent.Status == tState && strings.HasPrefix(*agent.Name, data.SeedType+"-template") { + return true + } + } + + t.Logf("not got %s, %s agent yet", data.SeedType+"-template", state) + + return false +} + +func removeAgentFromAdo(t *testing.T, data templateData) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + connection := azuredevops.NewPatConnection(data.URL, personalAccessToken) + taskClient, err := taskagent.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create task agent client, %s", err) + } + + args := taskagent.GetAgentPoolsArgs{ + PoolName: &data.PoolName, + } + pools, err := taskClient.GetAgentPools(ctx, args) + if err != nil { + t.Errorf("unable to get the pools, %s", err) + } + + poolID := *(*pools)[0].Id + + agents, err := taskClient.GetAgents(ctx, taskagent.GetAgentsArgs{PoolId: &poolID}) + if err != nil { + t.Errorf("unable to get the agent, %s", err) + } + + for _, agent := range *agents { + if *agent.Enabled && strings.HasPrefix(*agent.Name, data.SeedType+"-template") { + err := taskClient.DeleteAgent(ctx, taskagent.DeleteAgentArgs{PoolId: &poolID, AgentId: agent.Id}) + if err != nil { + t.Errorf("unable to delete the agent, %s", err) + } + } + } +} + +func CleanUpAdo(t *testing.T, data templateData) { + // cleanup + removeAgentFromAdo(t, data) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 12d6fcff23a..5b5e474138a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1193,7 +1193,6 @@ go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc # golang.org/x/crypto v0.6.0 => golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 ## explicit; go 1.17 -golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 @@ -2402,6 +2401,7 @@ sigs.k8s.io/yaml # github.com/chzyer/logex => github.com/chzyer/logex v1.2.1 # github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.1.0 # github.com/emicklei/go-restful => github.com/emicklei/go-restful v2.16.0+incompatible +# github.com/otiai10/mint => github.com/otiai10/mint v1.4.1 # golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 # golang.org/x/net => golang.org/x/net v0.7.0 # golang.org/x/text => golang.org/x/text v0.7.0