diff --git a/.github/.copilot/breadcrumbs/2025-11-05-0000-k8s-version-collection-with-ttl.md b/.github/.copilot/breadcrumbs/2025-11-05-0000-k8s-version-collection-with-ttl.md new file mode 100644 index 000000000..d428d4122 --- /dev/null +++ b/.github/.copilot/breadcrumbs/2025-11-05-0000-k8s-version-collection-with-ttl.md @@ -0,0 +1,81 @@ +# Implementation: Kubernetes Version Collection with TTL Caching + +## Overview +Add a `collectK8sVersion` function to the Azure property provider that collects the Kubernetes server version using the discoveryClient with a 15-minute TTL cache to minimize API calls. + +## Plan + +### Phase 1: Add Cache Fields +**Task 1.1: Add cache-related fields to PropertyProvider struct** +- Add `cachedK8sVersion` string field to store the cached version +- Add `k8sVersionCacheTime` time.Time field to track when the cache was last updated +- Add `k8sVersionCacheTTL` time.Duration field set to 15 minutes +- Add a mutex for thread-safe access to cached values + +### Phase 2: Implement collectK8sVersion Function +**Task 2.1: Implement the collectK8sVersion function** +- Check if cached version exists and is still valid (within TTL) +- If cache is valid, return cached version +- If cache is expired or empty, call discoveryClient.ServerVersion() +- Update cache with new version and current timestamp +- Return the version as a property with observation time + +### Phase 3: Integrate into Collect Method +**Task 3.1: Call collectK8sVersion in Collect method** +- Add call to collectK8sVersion in the Collect method +- Store the k8s version in the properties map + +### Phase 4: Write Unit Tests +**Task 4.1: Create unit tests for collectK8sVersion** +- Test cache hit scenario (cached version within TTL) +- Test cache miss scenario (no cached version) +- Test cache expiration scenario (cached version expired) +- Test error handling from discoveryClient +- Test thread safety of cache access + +### Phase 5: Verify Tests Pass +**Task 5.1: Run unit tests** +- Execute `go test` for the provider package +- Verify all tests pass + +## Success Criteria +- [x] Cache fields added to PropertyProvider struct +- [x] collectK8sVersion function implemented with TTL logic +- [x] Function integrated into Collect method +- [x] Unit tests created and passing +- [x] Thread-safe implementation verified + +## Implementation Notes +- Using sync.RWMutex for thread-safe cache access +- TTL set to 15 minutes as specified +- Uses the standard `propertyprovider.K8sVersionProperty` constant instead of creating a new one +- Changed `discoveryClient` field type from `discovery.DiscoveryInterface` to `discovery.ServerVersionInterface` for better testability and to only depend on the interface we actually need +- Fixed test nil pointer issue by conditionally setting the discoveryClient field only when it's non-nil + +## Final Implementation Summary +All tasks completed successfully. The `collectK8sVersion` function now: +1. Caches the Kubernetes version with a 15-minute TTL +2. Uses thread-safe mutex locks for concurrent access +3. Properly handles nil discovery client cases +4. Returns early if cache is still valid to minimize API calls +5. Updates cache atomically when fetching new version +6. Has comprehensive unit tests covering all scenarios including cache hits, misses, expiration, errors, and concurrency + +## Integration Test Updates +Updated integration tests to ignore the new k8s version property in comparisons: +- Added `ignoreK8sVersionProperty` using `cmpopts.IgnoreMapEntries` to filter out the k8s version from test expectations +- Integration tests now pass successfully (33 specs all passed) +- The k8s version is being collected correctly from the test Kubernetes environment, validating the implementation works end-to-end + +## Test Results +- Unit tests: ✅ 8/8 passed (7 in TestCollectK8sVersion + 1 in TestCollectK8sVersionConcurrency) +- Integration tests: ✅ 33/33 specs passed +- All scenarios validated including cache behavior, TTL expiration, error handling, and thread safety + +## Refactoring +Simplified the implementation by removing the `k8sVersionCacheTTL` instance field from PropertyProvider: +- Removed the `k8sVersionCacheTTL time.Duration` field from the struct +- Updated `collectK8sVersion` to use the `K8sVersionCacheTTL` constant directly +- Removed field initialization from `New()` and `NewWithPricingProvider()` constructors +- Updated unit tests to remove the field from test PropertyProvider instances +- All tests still pass after refactoring ✅ diff --git a/.gitignore b/.gitignore index b8e3541cc..828916b76 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,4 @@ ut-coverage.xml *~ .vscode/ +.qoder/ diff --git a/CLAUDE.md b/CLAUDE.md index 05ff228b0..d128e5019 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -34,8 +34,12 @@ make e2e-tests # Run custom E2E tests with labels make e2e-tests-custom + +# Clean up E2E environment +make clean-e2e-tests ``` + ### Code Quality ```bash # Run linting (required before commits) diff --git a/config/crd/bases/multicluster.x-k8s.io_clusterprofiles.yaml b/config/crd/bases/multicluster.x-k8s.io_clusterprofiles.yaml index f8883b17d..078dac97b 100644 --- a/config/crd/bases/multicluster.x-k8s.io_clusterprofiles.yaml +++ b/config/crd/bases/multicluster.x-k8s.io_clusterprofiles.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.17.3 name: clusterprofiles.multicluster.x-k8s.io spec: group: multicluster.x-k8s.io @@ -62,20 +62,98 @@ spec: status: description: ClusterProfileStatus defines the observed state of ClusterProfile. properties: + accessProviders: + description: |- + AccessProviders is a list of cluster access providers that can provide access + information for clusters. + items: + description: |- + AccessProvider defines how to access the cluster. + It contains the name of the provider name and the cluster connection details. + The name is used to identify different access info types, such as "kubeconfig" or "oidc". + The Cluster field contains the actual cluster connection details, such as server address, + certificate authority data, and authentication information. + properties: + cluster: + description: Cluster contains information about how to communicate + with a kubernetes cluster + properties: + certificate-authority: + description: CertificateAuthority is the path to a cert + file for the certificate authority. + type: string + certificate-authority-data: + description: CertificateAuthorityData contains PEM-encoded + certificate authority certificates. Overrides CertificateAuthority + format: byte + type: string + disable-compression: + description: |- + DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + type: boolean + extensions: + description: Extensions holds additional information. This + is useful for extenders so that reads and writes don't + clobber unknown fields + items: + description: NamedExtension relates nicknames to extension + information + properties: + extension: + description: Extension holds the extension information + type: object + x-kubernetes-preserve-unknown-fields: true + name: + description: Name is the nickname for this Extension + type: string + required: + - extension + - name + type: object + type: array + insecure-skip-tls-verify: + description: InsecureSkipTLSVerify skips the validity check + for the server's certificate. This will make your HTTPS + connections insecure. + type: boolean + proxy-url: + description: |- + ProxyURL is the URL to the proxy to be used for all requests made by this + client. URLs with "http", "https", and "socks5" schemes are supported. If + this configuration is not provided or the empty string, the client + attempts to construct a proxy configuration from http_proxy and + https_proxy environment variables. If these environment variables are not + set, the client does not attempt to proxy requests. + + socks5 proxying does not currently support spdy streaming endpoints (exec, + attach, port forward). + type: string + server: + description: Server is the address of the kubernetes cluster + (https://hostname:port). + type: string + tls-server-name: + description: TLSServerName is used to check server certificate. + If TLSServerName is empty, the hostname used to contact + the server is used. + type: string + required: + - server + type: object + name: + type: string + required: + - name + type: object + type: array conditions: description: Conditions contains the different condition statuses for this cluster. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -116,12 +194,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -133,24 +206,115 @@ spec: - type type: object type: array + credentialProviders: + description: |- + CredentialProviders is a list of cluster access providers that can provide access + information for clusters. + Deprecated: Use AccessProviders instead. If both AccessProviders and CredentialProviders are provided, both are used. In case they specify a provider with the same name, the one in AccessProviders is preferred. + items: + description: |- + AccessProvider defines how to access the cluster. + It contains the name of the provider name and the cluster connection details. + The name is used to identify different access info types, such as "kubeconfig" or "oidc". + The Cluster field contains the actual cluster connection details, such as server address, + certificate authority data, and authentication information. + properties: + cluster: + description: Cluster contains information about how to communicate + with a kubernetes cluster + properties: + certificate-authority: + description: CertificateAuthority is the path to a cert + file for the certificate authority. + type: string + certificate-authority-data: + description: CertificateAuthorityData contains PEM-encoded + certificate authority certificates. Overrides CertificateAuthority + format: byte + type: string + disable-compression: + description: |- + DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + type: boolean + extensions: + description: Extensions holds additional information. This + is useful for extenders so that reads and writes don't + clobber unknown fields + items: + description: NamedExtension relates nicknames to extension + information + properties: + extension: + description: Extension holds the extension information + type: object + x-kubernetes-preserve-unknown-fields: true + name: + description: Name is the nickname for this Extension + type: string + required: + - extension + - name + type: object + type: array + insecure-skip-tls-verify: + description: InsecureSkipTLSVerify skips the validity check + for the server's certificate. This will make your HTTPS + connections insecure. + type: boolean + proxy-url: + description: |- + ProxyURL is the URL to the proxy to be used for all requests made by this + client. URLs with "http", "https", and "socks5" schemes are supported. If + this configuration is not provided or the empty string, the client + attempts to construct a proxy configuration from http_proxy and + https_proxy environment variables. If these environment variables are not + set, the client does not attempt to proxy requests. + + socks5 proxying does not currently support spdy streaming endpoints (exec, + attach, port forward). + type: string + server: + description: Server is the address of the kubernetes cluster + (https://hostname:port). + type: string + tls-server-name: + description: TLSServerName is used to check server certificate. + If TLSServerName is empty, the hostname used to contact + the server is used. + type: string + required: + - server + type: object + name: + type: string + required: + - name + type: object + type: array properties: description: |- - Properties defines name/value pairs to represent properties of a cluster. - It could be a collection of ClusterProperty (KEP-2149) resources, - but could also be info based on other implementations. - The names of the properties can be predefined names from ClusterProperty resources - and is allowed to be customized by different cluster managers. + Properties defines cluster characteristics through a list of Property objects. + Each Property can be one of: + 1. A ClusterProperty resource (as defined in KEP-2149) + 2. Custom information from cluster manager implementations + Property names support both: + - Standard names from ClusterProperty resources + - Custom names defined by cluster managers items: description: |- - Property defines a name/value pair to represent a property of a cluster. - It could be a ClusterProperty (KEP-2149) resource, - but could also be info based on other implementations. - The name of the property can be predefined name from a ClusterProperty resource - and is allowed to be customized by different cluster managers. + Property defines the data structure to represent a property of a cluster. + It contains a name/value pair and the last observed time of the property on the cluster. This property can store various configurable details and metrics of a cluster, - which may include information such as the number of nodes, total and free CPU, - and total and free memory, among other potential attributes. + which may include information such as the entry point of the cluster, types of nodes, location, etc. according to KEP 4322. properties: + lastObservedTime: + description: |- + LastObservedTime is the last time the property was observed on the corresponding cluster. + The value is the timestamp when the property was observed not the time when the property was updated in the cluster-profile. + format: date-time + type: string name: description: |- Name is the name of a property resource on cluster. It's a well-known diff --git a/go.mod b/go.mod index 8be9f715d..d5eb73fbe 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 github.com/Azure/karpenter-provider-azure v1.5.1 - github.com/crossplane/crossplane-runtime v1.17.0 + github.com/crossplane/crossplane-runtime v1.20.0 github.com/evanphx/json-patch/v5 v5.9.11 github.com/google/go-cmp v0.7.0 github.com/onsi/ginkgo/v2 v2.23.4 @@ -24,20 +24,20 @@ require ( golang.org/x/sync v0.18.0 golang.org/x/time v0.11.0 gomodules.xyz/jsonpatch/v2 v2.4.0 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/component-base v0.32.3 + k8s.io/api v0.34.1 + k8s.io/apiextensions-apiserver v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/client-go v0.34.1 + k8s.io/component-base v0.34.1 k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.3 k8s.io/metrics v0.32.3 - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/cloud-provider-azure v1.32.4 sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.20 - sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/cluster-inventory-api v0.0.0-20251028164203-2e3fabb46733 + sigs.k8s.io/controller-runtime v0.21.0 ) require ( @@ -62,22 +62,20 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect @@ -88,7 +86,7 @@ require ( github.com/mailru/easyjson v0.9.0 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect @@ -109,10 +107,12 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/mock v0.5.1 // indirect go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect golang.org/x/text v0.31.0 // indirect @@ -122,13 +122,14 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cli-runtime v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/karpenter v1.5.0 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) replace ( diff --git a/go.sum b/go.sum index 99c67babe..5bb3b922e 100644 --- a/go.sum +++ b/go.sum @@ -97,16 +97,16 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/crossplane/crossplane-runtime v1.17.0 h1:y+GvxPT1M9s8BKt2AeZJdd2d6pg2xZeCO6LiR+VxEF8= -github.com/crossplane/crossplane-runtime v1.17.0/go.mod h1:vtglCrnnbq2HurAk9yLHa4qS0bbnCxaKL7C21cQcB/0= +github.com/crossplane/crossplane-runtime v1.20.0 h1:I54uipRIecqZyms+vz1J/l62yjVQ7HV5w+Nh3RMrUtc= +github.com/crossplane/crossplane-runtime v1.20.0/go.mod h1:lfV1VJenDc9PNVLxDC80YjPoTm+JdSZ13xlS2h37Dvg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= @@ -115,8 +115,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -133,8 +133,8 @@ github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC0 github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -163,13 +163,10 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -212,8 +209,9 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -295,8 +293,8 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk= @@ -321,6 +319,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -336,8 +338,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -369,8 +371,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -387,53 +389,55 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/cloud-provider v0.32.3 h1:WC7KhWrqXsU4b0E4tjS+nBectGiJbr1wuc1TpWXvtZM= k8s.io/cloud-provider v0.32.3/go.mod h1:/fwBfgRPuh16n8vLHT+PPT+Bc4LAEaJYj38opO2wsYY= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= +k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= k8s.io/component-helpers v0.32.3 h1:9veHpOGTPLluqU4hAu5IPOwkOIZiGAJUhHndfVc5FT4= k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao= k8s.io/csi-translation-lib v0.32.3 h1:fKdc9LMVEMk18xsgoPm1Ga8GjfhI7AM3UX8gnIeXZKs= k8s.io/csi-translation-lib v0.32.3/go.mod h1:VX6+hCKgQyFnUX3VrnXZAgYYBXkrqx4BZk9vxr9qRcE= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/metrics v0.32.3 h1:2vsBvw0v8rIIlczZ/lZ8Kcqk9tR6Fks9h+dtFNbc2a4= k8s.io/metrics v0.32.3/go.mod h1:9R1Wk5cb+qJpCQon9h52mgkVCcFeYxcY+YkumfwHVCU= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/cloud-provider-azure v1.32.4 h1:v50uJzcE04w25Ra9EfWX/GHTTJKUC0+0Xpt+TOJ+D14= sigs.k8s.io/cloud-provider-azure v1.32.4/go.mod h1:FbBaQt7N6/UVtK/VmIuJMLGe0gKUJ6NwoGrvH+zEa9w= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.20 h1:aVSc4LFdBVlrhlldIzPo4NrcTQRdnAlqTB31sOcPIrM= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.5.20/go.mod h1:OkkCYstvomfIwV4rvVIegymcgMnt7ZQ3+1Wi9WZmP1s= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.5.2 h1:jjFJF0PmS9IHLokD41mM6RVoqQF3BQtVDmQd6ZMnN6E= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.5.2/go.mod h1:7DdZ9ipIsmPLpBlfT4gueejcUlJBZQKWhdljQE5SKvc= -sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848 h1:WYPi2PdQyZwZkHG648v2jQl6deyCgyjJ0fkLYgUJ618= -sigs.k8s.io/cluster-inventory-api v0.0.0-20240730014211-ef0154379848/go.mod h1:/aN4e7RWOMHgT4xAjCNkV4YFcpKfpZCeumMIL7S+KNM= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/cluster-inventory-api v0.0.0-20251028164203-2e3fabb46733 h1:l90ANqblqFrE4L2QLLk+9iPjfmaLRvOFL51l/fgwUgg= +sigs.k8s.io/cluster-inventory-api v0.0.0-20251028164203-2e3fabb46733/go.mod h1:guwenlZ9iIfYlNxn7ExCfugOLTh6wjjRX3adC36YCmQ= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/karpenter v1.5.0 h1:3HaFtFvkteUJ+SjIViR1ImR0qR+GTqDulahauIuE4Qg= sigs.k8s.io/karpenter v1.5.0/go.mod h1:YuqGoQsLti+V7ugHQVGXuT4v1QwCMiKloHLcPDfwMbY= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/pkg/controllers/clusterinventory/clusterprofile/controller.go b/pkg/controllers/clusterinventory/clusterprofile/controller.go index e3cb7a320..b1de43f80 100644 --- a/pkg/controllers/clusterinventory/clusterprofile/controller.go +++ b/pkg/controllers/clusterinventory/clusterprofile/controller.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) @@ -158,7 +159,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, err } klog.V(2).InfoS("Cluster profile object is created or updated", "memberCluster", mcRef, "clusterProfile", klog.KObj(cp), "operation", createOrUpdateRes) - // sync the cluster profile condition from the member cluster condition + + // sync the cluster profile status/condition from the member cluster condition + r.fillInClusterStatus(mc, cp) r.syncClusterProfileCondition(mc, cp) if err = r.Status().Update(ctx, cp); err != nil { klog.ErrorS(err, "Failed to update cluster profile status", "memberCluster", mcRef, "clusterProfile", klog.KObj(cp)) @@ -167,10 +170,43 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } +// fillInClusterStatus fills in the ClusterProfile status fields from the MemberCluster status. +// Currently, it only fills in the Kubernetes version field. +func (r *Reconciler) fillInClusterStatus(mc *clusterv1beta1.MemberCluster, cp *clusterinventory.ClusterProfile) { + k8sversion, exists := mc.Status.Properties[propertyprovider.K8sVersionProperty] + if exists { + klog.V(3).InfoS("Get Kubernetes version from member cluster status", "kubernetesVersion", k8sversion.Value, "clusterProfile", klog.KObj(cp)) + cp.Status.Version = clusterinventory.ClusterVersion{ + Kubernetes: k8sversion.Value, + } + } + // Add the class access provider, we only have one so far + cp.Status.AccessProviders = []clusterinventory.AccessProvider{ + { + Name: controller.ClusterManagerName, + }, + } + // TODO throw and unexpected error if clusterEntryPoint is not found + // We don't have a way to get it yet + clusterEntry, exists := mc.Status.Properties[propertyprovider.ClusterEntryPointProperty] + if exists { + klog.V(3).InfoS("Get Kubernetes cluster entry point from member cluster status", "clusterEntryPoint", clusterEntry.Value, "clusterProfile", klog.KObj(cp)) + cp.Status.AccessProviders[0].Cluster.Server = clusterEntry.Value + } + // Get the CA Data + certificateAuthorityData, exists := mc.Status.Properties[propertyprovider.ClusterCertificateAuthorityProperty] + if exists { + klog.V(3).InfoS("Get Kubernetes cluster certificate authority data from member cluster status", "clusterProfile", klog.KObj(cp)) + cp.Status.AccessProviders[0].Cluster.CertificateAuthorityData = []byte(certificateAuthorityData.Value) + } else { + // throw an alert + _ = controller.NewUnexpectedBehaviorError(fmt.Errorf("cluster certificate authority data not found in member cluster %s status", mc.Name)) + cp.Status.AccessProviders[0].Cluster.InsecureSkipTLSVerify = true + } +} + // syncClusterProfileCondition syncs the ClusterProfile object's condition based on the MemberCluster object's condition. func (r *Reconciler) syncClusterProfileCondition(mc *clusterv1beta1.MemberCluster, cp *clusterinventory.ClusterProfile) { - // Update the cluster profile status. - // // For simplicity reasons, for now only the health check condition is populated, using // Fleet member agent's API server health check result. var mcHealthCond *metav1.Condition diff --git a/pkg/controllers/placement/controller.go b/pkg/controllers/placement/controller.go index fcdead2dc..0e7351994 100644 --- a/pkg/controllers/placement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -241,7 +241,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 } // We don't requeue the request here immediately so that placement can keep tracking the rollout status. - if createResourceSnapshotRes.Requeue { + if createResourceSnapshotRes.RequeueAfter > 0 { latestResourceSnapshotKObj := klog.KObj(latestResourceSnapshot) // We cannot create the resource snapshot immediately because of the resource snapshot creation interval. // Rebuild the seletedResourceIDs using the latestResourceSnapshot. @@ -297,7 +297,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 klog.V(2).InfoS("Placement has finished the rollout process and reached the desired status", "placement", placementKObj, "generation", placementObj.GetGeneration()) r.Recorder.Event(placementObj, corev1.EventTypeNormal, "PlacementRolloutCompleted", "Placement has finished the rollout process and reached the desired status") } - if createResourceSnapshotRes.Requeue { + if createResourceSnapshotRes.RequeueAfter > 0 { klog.V(2).InfoS("Requeue the request to handle the new resource snapshot", "placement", placementKObj, "generation", placementObj.GetGeneration()) // We requeue the request to handle the resource snapshot. return createResourceSnapshotRes, nil @@ -319,7 +319,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 // Here we requeue the request to prevent a bug in the watcher. klog.V(2).InfoS("Scheduler has not scheduled any cluster yet and requeue the request as a backup", "placement", placementKObj, "scheduledCondition", placementObj.GetCondition(string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType)), "generation", placementObj.GetGeneration()) - if createResourceSnapshotRes.Requeue { + if createResourceSnapshotRes.RequeueAfter > 0 { klog.V(2).InfoS("Requeue the request to handle the new resource snapshot", "placement", placementKObj, "generation", placementObj.GetGeneration()) // We requeue the request to handle the resource snapshot. return createResourceSnapshotRes, nil @@ -327,7 +327,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 return ctrl.Result{RequeueAfter: controllerResyncPeriod}, nil } klog.V(2).InfoS("Placement rollout has not finished yet and requeue the request", "placement", placementKObj, "status", placementObj.GetPlacementStatus(), "generation", placementObj.GetGeneration()) - if createResourceSnapshotRes.Requeue { + if createResourceSnapshotRes.RequeueAfter > 0 { klog.V(2).InfoS("Requeue the request to handle the new resource snapshot", "placement", placementKObj, "generation", placementObj.GetGeneration()) // We requeue the request to handle the resource snapshot. return createResourceSnapshotRes, nil @@ -553,7 +553,7 @@ func (r *Reconciler) getOrCreateResourceSnapshot(ctx context.Context, placement if error != nil { return ctrl.Result{}, nil, error } - if res.Requeue { + if res.RequeueAfter > 0 { // If the latest resource snapshot is not ready to be updated, we requeue the request. return res, latestResourceSnapshot, nil } @@ -636,7 +636,7 @@ func (r *Reconciler) shouldCreateNewResourceSnapshotNow(ctx context.Context, lat "resourceSnapshot", snapshotKObj, "nextCreationTime", nextCreationTime, "latestResourceSnapshotCreationTime", latestResourceSnapshot.GetCreationTimestamp(), "resourceSnapshotCreationMinimumInterval", r.ResourceSnapshotCreationMinimumInterval, "resourceChangesCollectionDuration", r.ResourceChangesCollectionDuration, "afterDuration", nextCreationTime.Sub(now)) - return ctrl.Result{Requeue: true, RequeueAfter: nextCreationTime.Sub(now)}, nil + return ctrl.Result{RequeueAfter: nextCreationTime.Sub(now)}, nil } return ctrl.Result{}, nil } diff --git a/pkg/controllers/placement/controller_test.go b/pkg/controllers/placement/controller_test.go index 3bbfef28d..3aa9e0c02 100644 --- a/pkg/controllers/placement/controller_test.go +++ b/pkg/controllers/placement/controller_test.go @@ -2714,8 +2714,8 @@ func TestGetOrCreateClusterResourceSnapshot(t *testing.T) { if err != nil { t.Fatalf("failed to handle getOrCreateResourceSnapshot: %v", err) } - if res.Requeue != tc.wantRequeue { - t.Fatalf("getOrCreateResourceSnapshot() got Requeue %v, want %v", res.Requeue, tc.wantRequeue) + if (res.RequeueAfter > 0) != tc.wantRequeue { + t.Fatalf("getOrCreateResourceSnapshot() got Requeue %v, want %v", (res.RequeueAfter > 0), tc.wantRequeue) } options := []cmp.Option{ @@ -3106,7 +3106,7 @@ func TestGetOrCreateClusterResourceSnapshot_failure(t *testing.T) { if err == nil { // if error is nil t.Fatal("getOrCreateClusterResourceSnapshot() = nil, want err") } - if res.Requeue { + if res.RequeueAfter > 0 { t.Fatal("getOrCreateClusterResourceSnapshot() requeue = true, want false") } if !errors.Is(err, controller.ErrUnexpectedBehavior) { @@ -4476,7 +4476,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { collectionDuration: 30 * time.Second, annotationValue: now.Add(-10 * time.Second).Format(time.RFC3339), wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 20 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 20 * time.Second}, }, { name: "ResourceChangesCollectionDuration is 0", @@ -4486,7 +4486,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { // no annotation → sets it and requeues annotationValue: "", wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 295 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, }, { name: "next detection time (now) + collection duration < latest resource snapshot creation time + creation interval", @@ -4496,7 +4496,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { // no annotation → sets it and requeues annotationValue: "", wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 295 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, }, { name: "next detection time (annotation) + collection duration < latest resource snapshot creation time + creation interval", @@ -4505,7 +4505,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { creationTime: now.Add(-10 * time.Second), annotationValue: now.Add(-5 * time.Second).Format(time.RFC3339), wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 290 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 290 * time.Second}, }, { name: "last resource snapshot created long time before", @@ -4513,7 +4513,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { collectionDuration: 30 * time.Second, creationTime: now.Add(-1 * time.Hour), wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 30 * time.Second}, }, { name: "next detection time (now) + collection duration >= latest resource snapshot creation time + creation interval", @@ -4521,7 +4521,7 @@ func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { collectionDuration: 60 * time.Second, creationTime: now.Add(-40 * time.Second), wantAnnoation: true, - wantRequeue: ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second}, + wantRequeue: ctrl.Result{RequeueAfter: 60 * time.Second}, }, } diff --git a/pkg/controllers/workapplier/apply_test.go b/pkg/controllers/workapplier/apply_test.go index 602d68526..39da8c5d6 100644 --- a/pkg/controllers/workapplier/apply_test.go +++ b/pkg/controllers/workapplier/apply_test.go @@ -384,7 +384,7 @@ func TestSetFleetLastAppliedAnnotation(t *testing.T) { nsManifestObj1 := ns.DeepCopy() wantNSManifestObj1 := ns.DeepCopy() wantNSManifestObj1.SetAnnotations(map[string]string{ - fleetv1beta1.LastAppliedConfigAnnotation: string("{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"name\":\"ns-1\"},\"spec\":{},\"status\":{}}\n"), + fleetv1beta1.LastAppliedConfigAnnotation: string("{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"ns-1\"},\"spec\":{},\"status\":{}}\n"), }) nsManifestObj2 := ns.DeepCopy() @@ -393,7 +393,7 @@ func TestSetFleetLastAppliedAnnotation(t *testing.T) { }) wantNSManifestObj2 := ns.DeepCopy() wantNSManifestObj2.SetAnnotations(map[string]string{ - fleetv1beta1.LastAppliedConfigAnnotation: string("{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"name\":\"ns-1\"},\"spec\":{},\"status\":{}}\n"), + fleetv1beta1.LastAppliedConfigAnnotation: string("{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"ns-1\"},\"spec\":{},\"status\":{}}\n"), }) // Annotation size limit is 262144 bytes. diff --git a/pkg/controllers/workapplier/utils.go b/pkg/controllers/workapplier/utils.go index 86f3b2b5b..1daec02d4 100644 --- a/pkg/controllers/workapplier/utils.go +++ b/pkg/controllers/workapplier/utils.go @@ -140,7 +140,6 @@ func discardFieldsIrrelevantInComparisonFrom(obj *unstructured.Unstructured) *un // Fields below are read-only fields in object meta. Fleet will ignore them in the comparison // process. - objCopy.SetCreationTimestamp(metav1.Time{}) // Deleted objects are handled separately in the apply process; for comparison purposes, // Fleet will ignore the deletion timestamp and grace period seconds. objCopy.SetDeletionTimestamp(nil) @@ -153,5 +152,13 @@ func discardFieldsIrrelevantInComparisonFrom(obj *unstructured.Unstructured) *un // Remove the status field. unstructured.RemoveNestedField(objCopy.Object, "status") + // Remove all creationTimestamp fields. + unstructured.RemoveNestedField(objCopy.Object, "metadata", "creationTimestamp") + // Resources that have .spec.template.metadata include: Deployment, Job, StatefulSet, + // DaemonSet, ReplicaSet, and CronJob. + unstructured.RemoveNestedField(objCopy.Object, "spec", "template", "metadata", "creationTimestamp") + // Also handle CronJob's .spec.jobTemplate.spec.template.metadata + unstructured.RemoveNestedField(objCopy.Object, "spec", "jobTemplate", "spec", "template", "metadata", "creationTimestamp") + return objCopy } diff --git a/pkg/propertyprovider/azure/provider.go b/pkg/propertyprovider/azure/provider.go index 0351fa242..a02fcb469 100644 --- a/pkg/propertyprovider/azure/provider.go +++ b/pkg/propertyprovider/azure/provider.go @@ -20,6 +20,9 @@ package azure import ( "context" "fmt" + "os" + "sync" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -27,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" @@ -58,6 +62,11 @@ const ( CostPrecisionTemplate = "%.3f" ) +var ( + // k8sVersionCacheTTL is the TTL for the cached Kubernetes version. + k8sVersionCacheTTL = 15 * time.Minute +) + const ( // The condition related values in use by the Azure property provider. CostPropertiesCollectionSucceededCondType = "AKSClusterCostPropertiesCollectionSucceeded" @@ -75,6 +84,9 @@ type PropertyProvider struct { podTracker *trackers.PodTracker nodeTracker *trackers.NodeTracker + // The discovery client to get k8s cluster version. + discoveryClient discovery.ServerVersionInterface + // The region where the Azure property provider resides. // // This is necessary as the pricing client requires that a region to be specified; it can @@ -92,6 +104,15 @@ type PropertyProvider struct { // to avoid name conflicts, though at this moment are mostly reserved for testing purposes. nodeControllerName string podControllerName string + + // Cache for Kubernetes version information with TTL. + k8sVersionMutex sync.Mutex + cachedK8sVersion string + cachedK8sVersionObservedTime time.Time + + // Cached cluster certificate authority data (base64 encoded). + clusterCertificateAuthority []byte + clusterCertificateAuthorityObservedTime time.Time } // Verify that the Azure property provider implements the MetricProvider interface at compile time. @@ -180,12 +201,11 @@ func (p *PropertyProvider) Start(ctx context.Context, config *rest.Config) error // in a passive manner with no need for any centralized state. LeaderElection: false, }) - p.mgr = mgr - if err != nil { klog.ErrorS(err, "Failed to start Azure property provider") return err } + p.mgr = mgr switch { case p.nodeTracker != nil: @@ -220,6 +240,33 @@ func (p *PropertyProvider) Start(ctx context.Context, config *rest.Config) error p.nodeTracker = trackers.NewNodeTracker(nil) } + p.discoveryClient = discovery.NewDiscoveryClientForConfigOrDie(config) + // Fetch the k8s version from the discovery client. + klog.V(2).Info("Fetching Kubernetes version from discovery client") + serverVersion, err := p.discoveryClient.ServerVersion() + if err != nil { + klog.ErrorS(err, "Failed to get Kubernetes server version from discovery client") + return err + } + // Update the cache with the new version. + p.cachedK8sVersion = serverVersion.GitVersion + p.cachedK8sVersionObservedTime = time.Now() + + // Cache the cluster certificate authority data (base64 encoded). + if len(config.CAFile) > 0 { + cadata, err := os.ReadFile(config.CAFile) + if err != nil { + klog.ErrorS(err, "Failed to read cluster certificate authority data from file") + return err + } + p.clusterCertificateAuthority = cadata + p.clusterCertificateAuthorityObservedTime = time.Now() + klog.V(2).Info("Cached cluster certificate authority data") + } else { + err := fmt.Errorf("rest.Config CAFile empty: %s", config.CAFile) + klog.ErrorS(err, "No certificate authority data available in rest.Config") + } + // Set up the node reconciler. klog.V(2).Info("Setting up the node reconciler") nodeReconciler := &controllers.NodeReconciler{ @@ -291,6 +338,9 @@ func (p *PropertyProvider) Collect(ctx context.Context) propertyprovider.Propert // Collect node-count related properties. p.collectNodeCountRelatedProperties(ctx, properties) + // Collect the Kubernetes version. + p.collectK8sVersion(ctx, properties) + // Collect the cost properties (if enabled). if p.isCostCollectionEnabled { costConds := p.collectCosts(ctx, properties) @@ -309,6 +359,14 @@ func (p *PropertyProvider) Collect(ctx context.Context) propertyprovider.Propert p.collectAvailableResource(ctx, &resources) } + // insert the cluster certificate authority property (if available) + if len(p.clusterCertificateAuthority) > 0 { + properties[propertyprovider.ClusterCertificateAuthorityProperty] = clusterv1beta1.PropertyValue{ + Value: string(p.clusterCertificateAuthority), + ObservationTime: metav1.NewTime(p.clusterCertificateAuthorityObservedTime), + } + } + // Return the collection response. return propertyprovider.PropertyCollectionResponse{ Properties: properties, @@ -423,6 +481,42 @@ func (p *PropertyProvider) collectAvailableResource(_ context.Context, usage *cl usage.Available = available } +// collectK8sVersion collects the Kubernetes server version information. +// It uses a cache with a 15-minute TTL to minimize API calls to the discovery client. +func (p *PropertyProvider) collectK8sVersion(_ context.Context, properties map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue) { + now := time.Now() + + // Check if we have a cached version that is still valid. + p.k8sVersionMutex.Lock() + defer p.k8sVersionMutex.Unlock() + if p.cachedK8sVersion != "" && now.Sub(p.cachedK8sVersionObservedTime) < k8sVersionCacheTTL { + // Cache is still valid, use the cached version. + properties[propertyprovider.K8sVersionProperty] = clusterv1beta1.PropertyValue{ + Value: p.cachedK8sVersion, + ObservationTime: metav1.NewTime(p.cachedK8sVersionObservedTime), + } + klog.V(2).InfoS("Using cached Kubernetes version", "version", p.cachedK8sVersion, "cacheAge", now.Sub(p.cachedK8sVersionObservedTime)) + return + } + + // Cache is expired or empty, fetch the version from the discovery client. + klog.V(2).Info("Fetching Kubernetes version from discovery client") + serverVersion, err := p.discoveryClient.ServerVersion() + if err != nil { + klog.ErrorS(err, "Failed to get Kubernetes server version from discovery client") + return + } + + // Update the cache with the new version. + p.cachedK8sVersion = serverVersion.GitVersion + p.cachedK8sVersionObservedTime = now + properties[propertyprovider.K8sVersionProperty] = clusterv1beta1.PropertyValue{ + Value: p.cachedK8sVersion, + ObservationTime: metav1.NewTime(now), + } + klog.V(2).InfoS("Collected Kubernetes version", "version", p.cachedK8sVersion) +} + // autoDiscoverRegionAndSetupTrackers auto-discovers the region of the AKS cluster. func (p *PropertyProvider) autoDiscoverRegionAndSetupTrackers(ctx context.Context, c client.Reader) (*string, error) { klog.V(2).Info("Auto-discover region for the Azure property provider") diff --git a/pkg/propertyprovider/azure/provider_integration_test.go b/pkg/propertyprovider/azure/provider_integration_test.go index 0d41a3e05..5601b6eab 100644 --- a/pkg/propertyprovider/azure/provider_integration_test.go +++ b/pkg/propertyprovider/azure/provider_integration_test.go @@ -39,6 +39,9 @@ import ( var ( ignoreObservationTimeFieldInPropertyValue = cmpopts.IgnoreFields(clusterv1beta1.PropertyValue{}, "ObservationTime") + ignoreNonDeterministicProperty = cmpopts.IgnoreMapEntries(func(k clusterv1beta1.PropertyName, v clusterv1beta1.PropertyValue) bool { + return k == propertyprovider.K8sVersionProperty || k == propertyprovider.ClusterCertificateAuthorityProperty + }) ) var ( @@ -311,7 +314,7 @@ var ( } res := p.Collect(ctx) - if diff := cmp.Diff(res, expectedRes, ignoreObservationTimeFieldInPropertyValue, cmpopts.EquateEmpty()); diff != "" { + if diff := cmp.Diff(res, expectedRes, ignoreObservationTimeFieldInPropertyValue, ignoreNonDeterministicProperty, cmpopts.EquateEmpty()); diff != "" { return fmt.Errorf("property collection response (-got, +want):\n%s", diff) } return nil diff --git a/pkg/propertyprovider/azure/provider_test.go b/pkg/propertyprovider/azure/provider_test.go index 30438b615..ade75592b 100644 --- a/pkg/propertyprovider/azure/provider_test.go +++ b/pkg/propertyprovider/azure/provider_test.go @@ -27,6 +27,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery/fake" + k8stesting "k8s.io/client-go/testing" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" @@ -742,12 +746,17 @@ func TestCollect(t *testing.T) { for idx := range tc.pods { podTracker.AddOrUpdate(&tc.pods[idx]) } - + k8sversion := "1.35.5" p := &PropertyProvider{ nodeTracker: nodeTracker, podTracker: podTracker, isCostCollectionEnabled: true, isAvailableResourcesCollectionEnabled: true, + cachedK8sVersion: k8sversion, + cachedK8sVersionObservedTime: time.Now(), + } + tc.wantMetricCollectionResponse.Properties[propertyprovider.K8sVersionProperty] = clusterv1beta1.PropertyValue{ + Value: k8sversion, } res := p.Collect(ctx) if diff := cmp.Diff(res, tc.wantMetricCollectionResponse, ignoreObservationTimeFieldInPropertyValue); diff != "" { @@ -935,12 +944,21 @@ func TestCollectWithDisabledFeatures(t *testing.T) { podTracker.AddOrUpdate(&pods[idx]) } } - + k8sversion := "1.34.6" p := &PropertyProvider{ nodeTracker: nodeTracker, podTracker: podTracker, isCostCollectionEnabled: tc.isCostCollectionEnabled, isAvailableResourcesCollectionEnabled: tc.isAvailableResourcesCollectionEnabled, + cachedK8sVersion: k8sversion, + cachedK8sVersionObservedTime: time.Now(), + clusterCertificateAuthority: []byte("CADATA"), + } + tc.wantPropertyCollectionResponse.Properties[propertyprovider.K8sVersionProperty] = clusterv1beta1.PropertyValue{ + Value: k8sversion, + } + tc.wantPropertyCollectionResponse.Properties[propertyprovider.ClusterCertificateAuthorityProperty] = clusterv1beta1.PropertyValue{ + Value: "CADATA", } res := p.Collect(ctx) if diff := cmp.Diff(res, tc.wantPropertyCollectionResponse, ignoreObservationTimeFieldInPropertyValue); diff != "" { @@ -949,3 +967,202 @@ func TestCollectWithDisabledFeatures(t *testing.T) { }) } } + +func TestCollectK8sVersion(t *testing.T) { + ctx := context.Background() + + testCases := []struct { + name string + discoveryClient *fake.FakeDiscovery + cachedVersion string + cacheStartTime time.Time + cacheTTL time.Duration + wantVersion string + wantDiscoveryCallsMade bool + }{ + { + name: "cache miss - no cached version", + discoveryClient: &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + FakedServerVersion: &version.Info{ + GitVersion: "v1.28.0", + }, + }, + cachedVersion: "", + cacheStartTime: time.Now().Add(-1 * time.Hour), // 1 hour ago + cacheTTL: 15 * time.Minute, + wantVersion: "v1.28.0", + wantDiscoveryCallsMade: true, + }, + { + name: "cache hit - cached version still valid", + discoveryClient: &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + FakedServerVersion: &version.Info{ + GitVersion: "v1.28.0", + }, + }, + cachedVersion: "v1.27.0", + cacheStartTime: time.Now().Add(-5 * time.Minute), // 5 minutes ago + cacheTTL: 15 * time.Minute, + wantVersion: "v1.27.0", + wantDiscoveryCallsMade: false, + }, + { + name: "cache expired - cached version too old", + discoveryClient: &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + FakedServerVersion: &version.Info{ + GitVersion: "v1.28.0", + }, + }, + cachedVersion: "v1.27.0", + cacheStartTime: time.Now().Add(-20 * time.Minute), // 20 minutes ago + cacheTTL: 1 * time.Minute, + wantVersion: "v1.28.0", + wantDiscoveryCallsMade: true, + }, + { + name: "cache at TTL boundary - should be expired", + discoveryClient: &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + FakedServerVersion: &version.Info{ + GitVersion: "v1.28.0", + }, + }, + cachedVersion: "v1.27.0", + cacheStartTime: time.Now().Add(-5*time.Minute - time.Second), // Just over 15 minutes + cacheTTL: 5 * time.Minute, + wantVersion: "v1.28.0", + wantDiscoveryCallsMade: true, + }, + { + name: "discovery client error - no property set", + discoveryClient: func() *fake.FakeDiscovery { + f := &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + } + f.AddReactor("get", "version", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("connection refused") + }) + return f + }(), + cachedVersion: "", + cacheStartTime: time.Time{}, + cacheTTL: 15 * time.Minute, + wantVersion: "", // No property should be set + wantDiscoveryCallsMade: true, + }, + { + name: "cache hit - cached version still valid even if the client errors", + discoveryClient: func() *fake.FakeDiscovery { + f := &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + } + f.AddReactor("get", "version", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, fmt.Errorf("connection refused") + }) + return f + }(), + cachedVersion: "v1.27.0", + cacheStartTime: time.Now().Add(-5 * time.Minute), // 5 minutes ago + cacheTTL: 15 * time.Minute, + wantVersion: "v1.27.0", + wantDiscoveryCallsMade: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + k8sVersionCacheTTL = tc.cacheTTL + p := &PropertyProvider{ + cachedK8sVersion: tc.cachedVersion, + cachedK8sVersionObservedTime: tc.cacheStartTime, + } + // Only set discoveryClient if it's not nil to avoid interface nil pointer issues + if tc.discoveryClient != nil { + p.discoveryClient = tc.discoveryClient + } + + properties := make(map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue) + p.collectK8sVersion(ctx, properties) + + if tc.wantVersion == "" { + // No property should be set + if _, ok := properties[propertyprovider.K8sVersionProperty]; ok { + t.Errorf("Expected no Kubernetes version property to be set, but got one") + } + } else { + // Check that the property is set correctly + gotProperty, ok := properties[propertyprovider.K8sVersionProperty] + if !ok { + t.Fatalf("Expected Kubernetes version property to be set, but it was not") + } + if gotProperty.Value != tc.wantVersion { + t.Errorf("collectK8sVersion() version = %v, want %v", gotProperty.Value, tc.wantVersion) + } + } // Check if discovery client was called the expected number of times + if tc.discoveryClient != nil { + if tc.wantDiscoveryCallsMade && len(tc.discoveryClient.Actions()) == 0 { + t.Errorf("Expected discovery client to be called, but it was not") + } + if !tc.wantDiscoveryCallsMade && len(tc.discoveryClient.Actions()) > 0 { + t.Errorf("Expected discovery client not to be called, but it was called %d times", len(tc.discoveryClient.Actions())) + } + } + + // Verify cache was updated when discovery client was called successfully + if tc.wantDiscoveryCallsMade && tc.discoveryClient != nil && tc.discoveryClient.FakedServerVersion != nil { + if p.cachedK8sVersion != tc.wantVersion { + t.Errorf("Expected cached version to be %v, but got %v", tc.wantVersion, p.cachedK8sVersion) + } + if p.cachedK8sVersionObservedTime.IsZero() { + t.Errorf("Expected cache time to be updated, but it was not") + } + } + }) + } +} + +func TestCollectK8sVersionConcurrency(t *testing.T) { + ctx := context.Background() + + discoveryClient := &fake.FakeDiscovery{ + Fake: &k8stesting.Fake{}, + FakedServerVersion: &version.Info{ + GitVersion: "v1.28.0", + }, + } + + p := &PropertyProvider{ + discoveryClient: discoveryClient, + } + + // Run multiple concurrent calls to collectK8sVersion + const numGoroutines = 100 + k8sVersionCacheTTL = 1 * time.Second // Reduce cache TTL to test cache expire + done := make(chan bool, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + go func() { + properties := make(map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue) + p.collectK8sVersion(ctx, properties) + done <- true + }() + } + + // Wait for all goroutines to complete + for i := 0; i < numGoroutines; i++ { + <-done + } + + // Verify that the discovery client was called at least once + if len(discoveryClient.Actions()) == 0 { + t.Errorf("Expected discovery client to be called at least once, but it was not") + } + + // Verify that the cache was populated + if p.cachedK8sVersion != "v1.28.0" { + t.Errorf("Expected cached version to be v1.28.0, but got %v", p.cachedK8sVersion) + } +} diff --git a/pkg/propertyprovider/commons.go b/pkg/propertyprovider/commons.go index 2fabae11b..3d69486e4 100644 --- a/pkg/propertyprovider/commons.go +++ b/pkg/propertyprovider/commons.go @@ -24,6 +24,15 @@ const ( // NodeCountProperty is a property that describes the number of nodes in the cluster. NodeCountProperty = "kubernetes-fleet.io/node-count" + // K8sVersionProperty is a property that describes the Kubernetes version of the cluster. + K8sVersionProperty = "k8s.io/k8s-version" + + // ClusterEntryPointProperty is a property that describes the cluster entry point (API server endpoint). + ClusterEntryPointProperty = "k8s.io/cluster-entrypoint" + + // ClusterCertificateAuthorityProperty is a property that describes the cluster's certificate authority data (base64 encoded). + ClusterCertificateAuthorityProperty = "k8s.io/cluster-certificate-authority-data" + // The resource properties. // Total and allocatable CPU resource properties. TotalCPUCapacityProperty = "resources.kubernetes-fleet.io/total-cpu" diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 7f04bbe52..19e6e46cb 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -228,6 +228,8 @@ func (s *Scheduler) scheduleOnce(ctx context.Context, worker int) { } // Requeue if the scheduling cycle suggests so. + //nolint:staticcheck + //lint:ignore SA1019 we need more time to fully migrate to RequeueAfter as we used these two fields separately. if res.Requeue { if res.RequeueAfter > 0 { s.queue.AddAfter(placementKey, res.RequeueAfter) diff --git a/pkg/utils/controller/controller.go b/pkg/utils/controller/controller.go index 566cb5fd2..4dbeb1cd0 100644 --- a/pkg/utils/controller/controller.go +++ b/pkg/utils/controller/controller.go @@ -284,6 +284,8 @@ func (w *controller) reconcileHandler(ctx context.Context, key interface{}) { w.queue.Forget(key) w.queue.AddAfter(key, result.RequeueAfter) metrics.FleetReconcileTotal.WithLabelValues(w.name, labelRequeueAfter).Inc() + //nolint:staticcheck + //lint:ignore SA1019 we need more time to fully migrate to RequeueAfter as we used these two fields separately. case result.Requeue: w.queue.AddRateLimited(key) metrics.FleetReconcileTotal.WithLabelValues(w.name, labelRequeue).Inc() diff --git a/test/e2e/join_and_leave_test.go b/test/e2e/join_and_leave_test.go index c7f00259b..5b6e79f0c 100644 --- a/test/e2e/join_and_leave_test.go +++ b/test/e2e/join_and_leave_test.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + clusterinventory "sigs.k8s.io/cluster-inventory-api/apis/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/client" fleetnetworkingv1alpha1 "go.goms.io/fleet-networking/api/v1alpha1" @@ -36,6 +37,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) const ( @@ -65,7 +67,6 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), // Note that this container cannot run in parallel with other containers. Describe("Test member cluster join and leave flow for cluster resource placement", Ordered, Serial, func() { - BeforeAll(func() { // Create the test resources. wantCRPSelectedResources = []placementv1beta1.ResourceIdentifier{ @@ -442,7 +443,7 @@ var _ = Describe("Test member cluster join and leave flow", Label("joinleave"), }) }) -var _ = Describe("Test member cluster force delete flow", Label("joinleave"), Ordered, Serial, func() { +var _ = Describe("Test member cluster join and leave without placement", Label("joinleave"), Ordered, Serial, func() { Context("Test cluster join and leave flow with member agent down and force delete member cluster", Label("joinleave"), Ordered, Serial, func() { It("Simulate the member agent going down in member cluster", func() { updateMemberAgentDeploymentReplicas(memberCluster3WestProdClient, 0) @@ -476,6 +477,71 @@ var _ = Describe("Test member cluster force delete flow", Label("joinleave"), Or }) }) +var _ = Describe("Test member cluster join and leave with clusterProfile", Label("joinleave"), Ordered, Serial, func() { + clusterProfileList := &clusterinventory.ClusterProfileList{} + Context("Test cluster profile and ", Label("joinleave"), Ordered, Serial, func() { + It("Make sure we have all the cluster profiles", func() { + Eventually(func() error { + if err := hubClient.List(ctx, clusterProfileList, &client.ListOptions{Namespace: utils.FleetSystemNamespace}); err != nil { + return fmt.Errorf("failed to get cluster profiles: %w", err) + } + + // create a map for easy lookup + cpMap := make(map[string]clusterinventory.ClusterProfile) + for idx := range clusterProfileList.Items { + cp := clusterProfileList.Items[idx] + cpMap[cp.Name] = cp + } + // make sure all the member clusters have a cluster profile + for idx := range allMemberClusterNames { + cp, ok := cpMap[allMemberClusterNames[idx]] + if !ok { + return fmt.Errorf("cluster profile for member cluster %s not found", allMemberClusterNames[idx]) + } + if cp.Status.Version.Kubernetes == "" { + return fmt.Errorf("cluster profile %s Kubernetes version should not be empty", cp.Name) + } + if len(cp.Status.AccessProviders) != 1 { + return fmt.Errorf("cluster profile %s has no access providers %+v", cp.Name, cp.Status.AccessProviders) + } + if cp.Status.AccessProviders[0].Name != controller.ClusterManagerName { + return fmt.Errorf("cluster profile %s access provider name %s doesn't match expected %s", cp.Name, cp.Status.AccessProviders[0].Name, controller.ClusterManagerName) + } + if len(cp.Status.AccessProviders[0].Cluster.CertificateAuthorityData) == 0 { + return fmt.Errorf("cluster profile %s access provider certificate authority data should not be empty", allMemberClusterNames[idx]) + } + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to verify cluster profiles") + }) + + It("Delete member cluster CR associated to the member cluster to simulate member left", func() { + markMemberClusterAsLeft(memberCluster3WestProdName) + }) + + It("Make sure we delete the corresponding cluster profiles", func() { + Eventually(func() error { + if err := hubClient.List(ctx, clusterProfileList, &client.ListOptions{Namespace: utils.FleetSystemNamespace}); err != nil { + return fmt.Errorf("failed to get cluster profiles: %w", err) + } + for idx := range clusterProfileList.Items { + cp := clusterProfileList.Items[idx] + if cp.Name == memberCluster3WestProdName { + return fmt.Errorf("cluster profile for member cluster %s should be deleted", memberCluster3WestProdName) + } + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to verify cluster profiles") + }) + }) + + AfterAll(func() { + By("Add the member cluster back") + createMemberCluster(memberCluster3WestProd.ClusterName, memberCluster3WestProd.PresentingServiceAccountInHubClusterName, labelsByClusterName[memberCluster3WestProd.ClusterName], annotationsByClusterName[memberCluster3WestProd.ClusterName]) + checkIfMemberClusterHasJoined(memberCluster3WestProd) + }) +}) + func updateMemberAgentDeploymentReplicas(clusterClient client.Client, replicas int32) { Eventually(func() error { var d appsv1.Deployment diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index a80b20081..36df25cf1 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -85,7 +85,7 @@ func createMemberCluster(name, svcAccountName string, labels, annotations map[st HeartbeatPeriodSeconds: memberClusterHeartbeatPeriodSeconds, }, } - Expect(hubClient.Create(ctx, mcObj)).To(Succeed(), "Failed to create member cluster object %s", name) + Expect(hubClient.Create(ctx, mcObj)).To(SatisfyAny(&utils.AlreadyExistMatcher{}, Succeed()), "Failed to create member cluster object %s", name) } func updateMemberClusterDeleteOptions(name string, deleteOptions *clusterv1beta1.DeleteOptions) { @@ -312,10 +312,15 @@ func checkIfAzurePropertyProviderIsWorking() { ignoreCostProperties := cmpopts.IgnoreMapEntries(func(k clusterv1beta1.PropertyName, v clusterv1beta1.PropertyValue) bool { return k == azure.PerCPUCoreCostProperty || k == azure.PerGBMemoryCostProperty }) + // we don't know the exact value of k8s version and cluster entry point + ignoreClusterProperties := cmpopts.IgnoreMapEntries(func(k clusterv1beta1.PropertyName, v clusterv1beta1.PropertyValue) bool { + return k == propertyprovider.K8sVersionProperty || k == propertyprovider.ClusterCertificateAuthorityProperty + }) if diff := cmp.Diff( mcObj.Status.Properties, wantStatus.Properties, ignoreTimeTypeFields, ignoreCostProperties, + ignoreClusterProperties, ); diff != "" { return fmt.Errorf("member cluster status properties diff (-got, +want):\n%s", diff) } @@ -576,7 +581,7 @@ func cleanupInvalidClusters() { } Eventually(func() error { err := hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj) - if err != nil { + if err != nil && !k8serrors.IsNotFound(err) { return err } mcObj.Finalizers = []string{}