diff --git a/.github/PULL_REQUEST_TEMPLATE/breaking_change.md b/.github/PULL_REQUEST_TEMPLATE/breaking_change.md new file mode 100644 index 0000000000..b15c979a17 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/breaking_change.md @@ -0,0 +1,5 @@ + + + + + diff --git a/.github/PULL_REQUEST_TEMPLATE/bug_fix.md b/.github/PULL_REQUEST_TEMPLATE/bug_fix.md new file mode 100644 index 0000000000..84c58c71e1 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/bug_fix.md @@ -0,0 +1,5 @@ + + + + + diff --git a/.github/PULL_REQUEST_TEMPLATE/compat_feature.md b/.github/PULL_REQUEST_TEMPLATE/compat_feature.md new file mode 100644 index 0000000000..3ad61bba83 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/compat_feature.md @@ -0,0 +1,3 @@ + + + diff --git a/.github/PULL_REQUEST_TEMPLATE/docs.md b/.github/PULL_REQUEST_TEMPLATE/docs.md new file mode 100644 index 0000000000..c99ec61c2d --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/docs.md @@ -0,0 +1,3 @@ + + + diff --git a/.github/PULL_REQUEST_TEMPLATE/other.md b/.github/PULL_REQUEST_TEMPLATE/other.md new file mode 100644 index 0000000000..eec7cbf6ee --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/other.md @@ -0,0 +1,3 @@ + + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..f6b8370e57 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,4 @@ + + + + diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..f37cf5fcdc --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,23 @@ +name: golangci-lint +on: + pull_request: + types: [opened, edited, synchronize, reopened] + branches: + - main + - master +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + strategy: + matrix: + working-directory: + - "" + - tools/setup-envtest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.49.0 + working-directory: ${{matrix.working-directory}} diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml new file mode 100644 index 0000000000..caa342f44b --- /dev/null +++ b/.github/workflows/verify.yml @@ -0,0 +1,14 @@ +on: + pull_request_target: + types: [opened, edited, reopened, synchronize] + +jobs: + verify: + runs-on: ubuntu-latest + name: verify PR contents + steps: + - name: Verifier action + id: verifier + uses: kubernetes-sigs/kubebuilder-release-tools@v0.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..c2c72faf34 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ + +# Vscode files +.vscode + +# Tools binaries. +hack/tools/bin diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..7d1d3665ce --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,143 @@ +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - deadcode + - depguard + - dogsled + - errcheck + - errorlint + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ifshort + - importas + - ineffassign + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - rowserrcheck + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +linters-settings: + ifshort: + # Maximum length of variable declaration measured in number of characters, after which linter won't suggest using short syntax. + max-decl-chars: 50 + importas: + no-unaliased: true + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + staticcheck: + go: "1.19" + stylecheck: + go: "1.19" + depguard: + include-go-root: true + packages: + - io/ioutil # https://go.dev/doc/go1.16#ioutil + +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + # We are disabling default golangci exclusions because we want to help reviewers to focus on reviewing the most relevant + # changes in PRs and avoid nitpicking. + exclude-use-default: false + # List of regexps of issue texts to exclude, empty list by default. + exclude: + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (G204|G104|G307) + - "ST1000: at least one file in a package should have a package comment" + exclude-rules: + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + path: fake_\.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega or ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: "G107: Potential HTTP request made with variable url" + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + - linters: + - gocritic + text: "singleCaseSwitch: should rewrite switch statement to if statement" + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: "G304: Potential file inclusion via variable" + - linters: + - revive + text: "package-comments: should have a package comment" + +run: + timeout: 10m + skip-files: + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" + allow-parallel-runners: true diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..2c0ea1f667 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing guidelines + +## Sign the CLA + +Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests. + +Please see https://git.k8s.io/community/CLA.md for more info + +## Contributing steps + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +## Test locally + +Run the command `make test` to test the changes locally. diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000000..c21b29e287 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,81 @@ +# FAQ + +### Q: How do I know which type of object a controller references? + +**A**: Each controller should only reconcile one object type. Other +affected objects should be mapped to a single type of root object, using +the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event +handlers, and potentially indices. Then, your Reconcile method should +attempt to reconcile *all* state for that given root objects. + +### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? + +**A**: You should not. Reconcile functions should be idempotent, and +should always reconcile state by reading all the state it needs, then +writing updates. This allows your reconciler to correctly respond to +generic events, adjust to skipped or coalesced events, and easily deal +with application startup. The controller will enqueue reconcile requests +for both old and new objects if a mapping changes, but it's your +responsibility to make sure you have enough information to be able clean +up state that's no longer referenced. + +### Q: My cache might be stale if I read from a cache! How should I deal with that? + +**A**: There are several different approaches that can be taken, depending +on your situation. + +- When you can, take advantage of optimistic locking: use deterministic + names for objects you create, so that the Kubernetes API server will + warn you if the object already exists. Many controllers in Kubernetes + take this approach: the StatefulSet controller appends a specific number + to each pod that it creates, while the Deployment controller hashes the + pod template spec and appends that. + +- In the few cases when you cannot take advantage of deterministic names + (e.g. when using generateName), it may be useful in to track which + actions you took, and assume that they need to be repeated if they don't + occur after a given time (e.g. using a requeue result). This is what + the ReplicaSet controller does. + +In general, write your controller with the assumption that information +will eventually be correct, but may be slightly out of date. Make sure +that your reconcile function enforces the entire state of the world each +time it runs. If none of this works for you, you can always construct +a client that reads directly from the API server, but this is generally +considered to be a last resort, and the two approaches above should +generally cover most circumstances. + +### Q: Where's the fake client? How do I use it? + +**A**: The fake client +[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake), +but we generally recommend using +[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) +to test against a real API server. In our experience, tests using fake +clients gradually re-implement poorly-written impressions of a real API +server, which leads to hard-to-maintain, complex test code. + +### Q: How should I write tests? Any suggestions for getting started? + +- Use the aforementioned + [envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) + to spin up a real API server instead of trying to mock one out. + +- Structure your tests to check that the state of the world is as you + expect it, *not* that a particular set of API calls were made, when + working with Kubernetes APIs. This will allow you to more easily + refactor and improve the internals of your controllers without changing + your tests. + +- Remember that any time you're interacting with the API server, changes + may have some delay between write time and reconcile time. + +### Q: What are these errors about no Kind being registered for a type? + +**A**: You're probably missing a fully-set-up Scheme. Schemes record the +mapping between Go types and group-version-kinds in Kubernetes. In +general, your application should have its own Scheme containing the types +from the API groups that it needs (be they Kubernetes types or your own). +See the [scheme builder +docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for +more information. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..36647c697f --- /dev/null +++ b/Makefile @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +## -------------------------------------- +## General +## -------------------------------------- + +SHELL:=/usr/bin/env bash +.DEFAULT_GOAL:=help + +# Use GOPROXY environment variable if set +GOPROXY := $(shell go env GOPROXY) +ifeq ($(GOPROXY),) +GOPROXY := https://proxy.golang.org +endif +export GOPROXY + +# Active module mode, as we use go modules to manage dependencies +export GO111MODULE=on + +# Tools. +TOOLS_DIR := hack/tools +TOOLS_BIN_DIR := $(TOOLS_DIR)/bin +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint) +GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen +ENVTEST_DIR := $(abspath tools/setup-envtest) + +# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. +# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. +# More info over the usage of ANSI control characters for terminal formatting: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info over awk command: http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Testing +## -------------------------------------- + +.PHONY: test +test: test-tools ## Run the script check-everything.sh which will check all. + TRACE=1 ./hack/check-everything.sh + +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + +## -------------------------------------- +## Binaries +## -------------------------------------- + +$(GO_APIDIFF): $(TOOLS_DIR)/go.mod # Build go-apidiff from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/go-apidiff github.com/joelanford/go-apidiff + +$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen + +$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder. + hack/ensure-golangci-lint.sh \ + -b $(TOOLS_BIN_DIR) \ + $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //') + +## -------------------------------------- +## Linting +## -------------------------------------- + +.PHONY: lint +lint: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + cd tools/setup-envtest; $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + +.PHONY: lint-fix +lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter. + GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint + +## -------------------------------------- +## Generate +## -------------------------------------- + +.PHONY: modules +modules: ## Runs go mod to ensure modules are up to date. + go mod tidy + cd $(TOOLS_DIR); go mod tidy + cd $(ENVTEST_DIR); go mod tidy + +.PHONY: generate +generate: $(CONTROLLER_GEN) ## Runs controller-gen for internal types for config file + $(CONTROLLER_GEN) object paths="./pkg/config/v1alpha1/...;./examples/configfile/custom/v1alpha1/..." + +## -------------------------------------- +## Cleanup / Verification +## -------------------------------------- + +.PHONY: clean +clean: ## Cleanup. + $(MAKE) clean-bin + +.PHONY: clean-bin +clean-bin: ## Remove all generated binaries. + rm -rf hack/tools/bin + +.PHONY: verify-modules +verify-modules: modules + @if !(git diff --quiet HEAD -- go.sum go.mod); then \ + echo "go module files are out of date, please run 'make modules'"; exit 1; \ + fi diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000000..4b1fa044bf --- /dev/null +++ b/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers +reviewers: + - controller-runtime-admins + - controller-runtime-reviewers + - controller-runtime-approvers diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES new file mode 100644 index 0000000000..82f7a2bef4 --- /dev/null +++ b/OWNERS_ALIASES @@ -0,0 +1,41 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +aliases: + # active folks who can be contacted to perform admin-related + # tasks on the repo, or otherwise approve any PRS. + controller-runtime-admins: + - droot + - mengqiy + - pwittrock + + # non-admin folks who have write-access and can approve any PRs in the repo + controller-runtime-maintainers: + - vincepri + - joelanford + + # non-admin folks who can approve any PRs in the repo + controller-runtime-approvers: + - gerred + - shawn-hurley + - alvaroaleman + + # folks who can review and LGTM any PRs in the repo (doesn't + # include approvers & admins -- those count too via the OWNERS + # file) + controller-runtime-reviewers: + - alenkacz + - vincepri + - alexeldeib + - varshaprasad96 + - fillzpp + + # folks to can approve things in the directly-ported + # testing_frameworks portions of the codebase + testing-integration-approvers: + - apelisse + - hoegaarden + + # folks who may have context on ancient history, + # but are no longer directly involved + controller-runtime-emeritus-maintainers: + - directxman12 diff --git a/README.md b/README.md new file mode 100644 index 0000000000..cd358b94f9 --- /dev/null +++ b/README.md @@ -0,0 +1,66 @@ +[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime) +[![godoc](https://pkg.go.dev/badge/sigs.k8s.io/controller-runtime)](https://pkg.go.dev/sigs.k8s.io/controller-runtime) + +# Kubernetes controller-runtime Project + +The Kubernetes controller-runtime Project is a set of go libraries for building +Controllers. It is leveraged by [Kubebuilder](https://book.kubebuilder.io/) and +[Operator SDK](https://github.com/operator-framework/operator-sdk). Both are +a great place to start for new projects. See +[Kubebuilder's Quick Start](https://book.kubebuilder.io/quick-start.html) to +see how it can be used. + +Documentation: + +- [Package overview](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg) +- [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) +- [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) +- [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/master/designs) + +# Versioning, Maintenance, and Compatibility + +The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR: + +Users: + +- We follow [Semantic Versioning (semver)](https://semver.org) +- Use releases with your dependency management to ensure that you get compatible code +- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) + +Contributors: + +- All code PR must be labeled with :bug: (patch fixes), :sparkles: (backwards-compatible features), or :warning: (breaking changes) +- Breaking changes will find their way into the next major release, other changes will go into an semi-immediate patch or minor release +- For a quick PR template suggesting the right information, use one of these PR templates: + * [Breaking Changes/Features](/.github/PULL_REQUEST_TEMPLATE/breaking_change.md) + * [Backwards-Compatible Features](/.github/PULL_REQUEST_TEMPLATE/compat_feature.md) + * [Bug fixes](/.github/PULL_REQUEST_TEMPLATE/bug_fix.md) + * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) + * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) + +## FAQ + +See [FAQ.md](FAQ.md) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +controller-runtime is a subproject of the [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) project +in sig apimachinery. + +You can reach the maintainers of this project at: + +- Slack channel: [#kubebuilder](http://slack.k8s.io/#kubebuilder) +- Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) + +## Contributing +Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. +The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. +Before starting any work, please either comment on an existing issue, or file a new one. + +## Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..134a73a31b --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,47 @@ +# Release Process + +The Kubernetes controller-runtime Project is released on an as-needed basis. The process is as follows: + +**Note:** Releases are done from the `release-MAJOR.MINOR` branches. For PATCH releases is not required +to create a new branch you will just need to ensure that all big fixes are cherry-picked into the respective +`release-MAJOR.MINOR` branch. To know more about versioning check https://semver.org/. + +## How to do a release + +### Create the new branch and the release tag + +1. Create a new branch `git checkout -b release-` from master +2. Push the new branch to the remote repository + +### Now, let's generate the changelog + +1. Create the changelog from the new branch `release-` (`git checkout release-`). +You will need to use the [kubebuilder-release-tools][kubebuilder-release-tools] to generate the notes. See [here][release-notes-generation] + +> **Note** +> - You will need to have checkout locally from the remote repository the previous branch +> - Also, ensure that you fetch all tags from the remote `git fetch --all --tags` + +### Draft a new release from GitHub + +1. Create a new tag with the correct version from the new `release-` branch +2. Add the changelog on it and publish. Now, the code source is released ! + +### Add a new Prow test the for the new branch release + +1. Create a new prow test under [github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime) +for the new `release-` branch. (i.e. for the `0.11.0` release see the PR: https://github.com/kubernetes/test-infra/pull/25205) +2. Ping the infra PR in the controller-runtime slack channel for reviews. + +### Announce the new release: + +1. Publish on the Slack channel the new release, i.e: + +```` +:announce: Controller-Runtime v0.12.0 has been released! +This release includes a Kubernetes dependency bump to v1.24. +For more info, see the release page: https://github.com/kubernetes-sigs/controller-runtime/releases. + :tada: Thanks to all our contributors! +```` + +2. An announcement email is sent to `kubebuilder@googlegroups.com` with the subject `[ANNOUNCE] Controller-Runtime $VERSION is released` diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS new file mode 100644 index 0000000000..32e6a3b904 --- /dev/null +++ b/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +pwittrock +droot diff --git a/TMP-LOGGING.md b/TMP-LOGGING.md new file mode 100644 index 0000000000..b3cfc66517 --- /dev/null +++ b/TMP-LOGGING.md @@ -0,0 +1,169 @@ +Logging Guidelines +================== + +controller-runtime uses a kind of logging called *structured logging*. If +you've used a library like Zap or logrus before, you'll be familiar with +the concepts we use. If you've only used a logging library like the "log" +package (in the Go standard library) or "glog" (in Kubernetes), you'll +need to adjust how you think about logging a bit. + +### Getting Started With Structured Logging + +With structured logging, we associate a *constant* log message with some +variable key-value pairs. For instance, suppose we wanted to log that we +were starting reconciliation on a pod. In the Go standard library logger, +we might write: + +```go +log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName) +``` + +In controller-runtime, we'd instead write: + +```go +logger.Info("starting reconciliation", "pod", req.NamespacedNamed) +``` + +or even write + +```go +func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { + logger := logger.WithValues("pod", req.NamespacedName) + // do some stuff + logger.Info("starting reconciliation") +} +``` + +Notice how we've broken out the information that we want to convey into +a constant message (`"starting reconciliation"`) and some key-value pairs +that convey variable information (`"pod", req.NamespacedName`). We've +there-by added "structure" to our logs, which makes them easier to save +and search later, as well as correlate with metrics and events. + +All of controller-runtime's logging is done via +[logr](https://github.com/go-logr/logr), a generic interface for +structured logging. You can use whichever logging library you want to +implement the actual mechanics of the logging. controller-runtime +provides some helpers to make it easy to use +[Zap](https://go.uber.org/zap) as the implementation. + +You can configure the logging implementation using +`"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That +package also contains the convenience functions for setting up Zap. + +You can get a handle to the the "root" logger using +`"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call +`WithName` to create individual named loggers. You can call `WithName` +repeatedly to chain names together: + +```go +logger := log.Log.WithName("controller").WithName("replicaset") +// in reconcile... +logger = logger.WithValues("replicaset", req.NamespacedName) +// later on in reconcile... +logger.Info("doing things with pods", "pod", newPod) +``` + +As seen above, you can also call `WithValue` to create a new sub-logger +that always attaches some key-value pairs to a logger. + +Finally, you can use `V(1)` to mark a particular log line as "debug" logs: + +```go +logger.V(1).Info("this is particularly verbose!", "state of the world", +allKubernetesObjectsEverywhere) +``` + +While it's possible to use higher log levels, it's recommended that you +stick with `V(1)` or `V(0)` (which is equivalent to not specifying `V`), +and then filter later based on key-value pairs or messages; different +numbers tend to lose meaning easily over time, and you'll be left +wondering why particular logs lines are at `V(5)` instead of `V(7)`. + +## Logging errors + +Errors should *always* be logged with `log.Error`, which allows logr +implementations to provide special handling of errors (for instance, +providing stack traces in debug mode). + +It's acceptable to log call `log.Error` with a nil error object. This +conveys that an error occurred in some capacity, but that no actual +`error` object was involved. + +Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`. +It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned. + +## Logging messages + +- Don't put variable content in your messages -- use key-value pairs for + that. Never use `fmt.Sprintf` in your message. + +- Try to match the terminology in your messages with your key-value pairs + -- for instance, if you have a key-value pairs `api version`, use the + term `APIVersion` instead of `GroupVersion` in your message. + +## Logging Kubernetes Objects + +Kubernetes objects should be logged directly, like `log.Info("this is +a Kubernetes object", "pod", somePod)`. controller-runtime provides +a special encoder for Zap that will transform Kubernetes objects into +`name, namespace, apiVersion, kind` objects, when available and not in +development mode. Other logr implementations should implement similar +logic. + +## Logging Structured Values (Key-Value pairs) + +- Use lower-case, space separated keys. For example `object` for objects, + `api version` for `APIVersion` + +- Be consistent across your application, and with controller-runtime when + possible. + +- Try to be brief but descriptive. + +- Match terminology in keys with terminology in the message. + +- Be careful logging non-Kubernetes objects verbatim if they're very + large. + +### Groups, Versions, and Kinds + +- Kinds should not be logged alone (they're meaningless alone). Use + a `GroupKind` object to log them instead, or a `GroupVersionKind` when + version is relevant. + +- If you need to log an API version string, use `api version` as the key + (formatted as with a `GroupVersion`, or as received directly from API + discovery). + +### Objects and Types + +- If code works with a generic Kubernetes `runtime.Object`, use the + `object` key. For specific objects, prefer the resource name as the key + (e.g. `pod` for `v1.Pod` objects). + +- For non-Kubernetes objects, the `object` key may also be used, if you + accept a generic interface. + +- When logging a raw type, log it using the `type` key, with a value of + `fmt.Sprintf("%T", typ)` + +- If there's specific context around a type, the key may be more specific, + but should end with `type` -- for instance, `OwnerType` should be logged + as `owner` in the context of `log.Error(err, "Could not get ObjectKinds + for OwnerType", `owner type`, fmt.Sprintf("%T"))`. When possible, favor + communicating kind instead. + +### Multiple things + +- When logging multiple things, simply pluralize the key. + +### controller-runtime Specifics + +- Reconcile requests should be logged as `request`, although normal code + should favor logging the key. + +- Reconcile keys should be logged as with the same key as if you were + logging the object directly (e.g. `log.Info("reconciling pod", "pod", + req.NamespacedName)`). This ends up having a similar effect to logging + the object directly. diff --git a/VERSIONING.md b/VERSIONING.md new file mode 100644 index 0000000000..18779000ec --- /dev/null +++ b/VERSIONING.md @@ -0,0 +1,30 @@ +# Versioning and Branching in controller-runtime + +We follow the [common KubeBuilder versioning guidelines][guidelines], and +use the corresponding tooling. + +For the purposes of the aforementioned guidelines, controller-runtime +counts as a "library project", but otherwise follows the guidelines +exactly. + +[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md + +## Compatiblity and Release Support + +For release branches, we generally tend to support backporting one (1) +major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back +further if the need arises and is very pressing (e.g. security updates). + +### Dependency Support + +Note the [guidelines on dependency versions][dep-versions]. Particularly: + +- We **DO** guarantee Kubernetes REST API compability -- if a given + version of controller-runtime stops working with what should be + a supported version of Kubernetes, this is almost certainly a bug. + +- We **DO NOT** guarantee any particular compability matrix between + kubernetes library dependencies (client-go, apimachinery, etc); Such + compability is infeasible due to the way those libraries are versioned. + +[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility diff --git a/alias.go b/alias.go new file mode 100644 index 0000000000..29f964dcbe --- /dev/null +++ b/alias.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerruntime + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Builder builds an Application ControllerManagedBy (e.g. Operator) and returns a manager.Manager to start it. +type Builder = builder.Builder + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request = reconcile.Request + +// Result contains the result of a Reconciler invocation. +type Result = reconcile.Result + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager = manager.Manager + +// Options are the arguments for creating a new Manager. +type Options = manager.Options + +// SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type SchemeBuilder = scheme.Builder + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion = schema.GroupVersion + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types. +type GroupResource = schema.GroupResource + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta = metav1.TypeMeta + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta = metav1.ObjectMeta + +var ( + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Will log an error and exit if there is an error creating the rest.Config. + GetConfigOrDie = config.GetConfigOrDie + + // GetConfig creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Config precedence + // + // * --kubeconfig flag pointing at a file + // + // * KUBECONFIG environment variable pointing at a file + // + // * In-cluster config if running in cluster + // + // * $HOME/.kube/config if exists. + GetConfig = config.GetConfig + + // ConfigFile returns the cfg.File function for deferred config file loading, + // this is passed into Options{}.From() to populate the Options fields for + // the manager. + ConfigFile = cfg.File + + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. + NewControllerManagedBy = builder.ControllerManagedBy + + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager. + NewWebhookManagedBy = builder.WebhookManagedBy + + // NewManager returns a new Manager for creating Controllers. + NewManager = manager.New + + // CreateOrUpdate creates or updates the given object obj in the Kubernetes + // cluster. The object's desired state should be reconciled with the existing + // state using the passed in ReconcileFn. obj must be a struct pointer so that + // obj can be updated with the content returned by the Server. + // + // It returns the executed operation and an error. + CreateOrUpdate = controllerutil.CreateOrUpdate + + // SetControllerReference sets owner as a Controller OwnerReference on owned. + // This is used for garbage collection of the owned object and for + // reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). + // Since only one OwnerReference can be a controller, it returns an error if + // there is another OwnerReference with Controller flag set. + SetControllerReference = controllerutil.SetControllerReference + + // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned + // which is closed on one of these signals. If a second signal is caught, the program + // is terminated with exit code 1. + SetupSignalHandler = signals.SetupSignalHandler + + // Log is the base logger used by controller-runtime. It delegates + // to another logr.Logger. You *must* call SetLogger to + // get any actual logging. + Log = log.Log + + // LoggerFrom returns a logger with predefined values from a context.Context. + // The logger, when used with controllers, can be expected to contain basic information about the object + // that's being reconciled like: + // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. + // - `name` and `namespace` injected from the reconciliation request. + // + // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. + LoggerFrom = log.FromContext + + // LoggerInto takes a context and sets the logger as one of its keys. + // + // This is meant to be used in reconcilers to enrich the logger within a context with additional values. + LoggerInto = log.IntoContext + + // SetLogger sets a concrete logging implementation for all deferred Loggers. + SetLogger = log.SetLogger +) diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/designs/README.md b/designs/README.md new file mode 100644 index 0000000000..bf8b5000a9 --- /dev/null +++ b/designs/README.md @@ -0,0 +1,36 @@ +Designs +======= + +These are the design documents for changes to Controller Runtime. They +exist to help document the design processes that go into writing +Controller Runtime, but may not be up-to-date (more below). + +Not all changes to Controller Runtime need a design document -- only major +ones. Use your best judgement. + +When submitting a design document, we encourage having written +a proof-of-concept, and it's perfectly acceptable to submit the +proof-of-concept PR simultaneously with the design document, as the +proof-of-concept process can help iron out wrinkles and can help with the +`Example` section of the template. + +## Out-of-Date Designs + +**Controller Runtime documentation +[GoDoc](https://pkg.go.dev/sigs.k8s.io/controller-runtime) should be +considered the canonical, update-to-date reference and architectural +documentation** for Controller Runtime. + +However, if you see an out-of-date design document, feel free to submit +a PR marking it as such, and add an addendum linking to issues documenting +why things changed. For example: + +```markdown + +# Out of Date + +This change is out of date. It turns out curly braces are frustrating to +type, so we had to abandon functions entirely, and have users specify +custom functionality using strings of Common LISP instead. See #000 for +more information. +``` diff --git a/designs/component-config.md b/designs/component-config.md new file mode 100644 index 0000000000..8aebec4f96 --- /dev/null +++ b/designs/component-config.md @@ -0,0 +1,320 @@ +# ComponentConfig Controller Runtime Support +Author: @christopherhein + +Last Updated on: 03/02/2020 + +## Table of Contents + + + * [ComponentConfig Controller Runtime Support](#componentconfig-controller-runtime-support) + * [Table of Contents](#table-of-contents) + * [Summary](#summary) + * [Motivation](#motivation) + * [Links to Open Issues](#links-to-open-issues) + * [Goals](#goals) + * [Non-Goals/Future Work](#non-goalsfuture-work) + * [Proposal](#proposal) + * [ComponentConfig Load Order](#componentconfig-load-order) + * [Embeddable ComponentConfig Type](#embeddable-componentconfig-type) + * [Default ComponentConfig Type](#default-componentconfig-type) + * [Using Flags w/ ComponentConfig](#using-flags-w-componentconfig) + * [Kubebuilder Scaffolding Example](#kubebuilder-scaffolding-example) + * [User Stories](#user-stories) + * [Controller Author with controller-runtime and default type](#controller-author-with-controller-runtime-and-default-type) + * [Controller Author with controller-runtime and custom type](#controller-author-with-controller-runtime-and-custom-type) + * [Controller Author with kubebuilder (tbd proposal for kubebuilder)](#controller-author-with-kubebuilder-tbd-proposal-for-kubebuilder) + * [Controller User without modifications to config](#controller-user-without-modifications-to-config) + * [Controller User with modifications to config](#controller-user-with-modifications-to-config) + * [Risks and Mitigations](#risks-and-mitigations) + * [Alternatives](#alternatives) + * [Implementation History](#implementation-history) + + + +## Summary + +Currently controllers that use `controller-runtime` need to configure the `ctrl.Manager` by using flags or hardcoding values into the initialization methods. Core Kubernetes has started to move away from using flags as a mechanism for configuring components and standardized on [`ComponentConfig` or Versioned Component Configuration Files](https://docs.google.com/document/d/1FdaEJUEh091qf5B98HM6_8MS764iXrxxigNIdwHYW9c/edit). This proposal is to bring `ComponentConfig` to `controller-runtime` to allow controller authors to make `go` types backed by `apimachinery` to unmarshal and configure the `ctrl.Manager{}` reducing the flags and allowing code based tools to easily configure controllers instead of requiring them to mutate CLI args. + +## Motivation + +This change is important because: +- it will help make it easier for controllers to be configured by other machine processes +- it will reduce the required flags required to start a controller +- allow for configuration types which aren't natively supported by flags +- allow using and upgrading older configurations avoiding breaking changes in flags + +### Links to Open Issues + +- [#518 Provide a ComponentConfig to tweak the Manager](https://github.com/kubernetes-sigs/controller-runtime/issues/518) +- [#207 Reduce command line flag boilerplate](https://github.com/kubernetes-sigs/controller-runtime/issues/207) +- [#722 Implement ComponentConfig by default & stop using (most) flags](https://github.com/kubernetes-sigs/kubebuilder/issues/722) + +### Goals + +- Provide an interface for pulling configuration data out of exposed `ComponentConfig` types (see below for implementation) +- Provide a new `ctrl.NewFromComponentConfig()` function for initializing a manager +- Provide an embeddable `ControllerManagerConfiguration` type for easily authoring `ComponentConfig` types +- Provide an `DefaultControllerConfig` to make the switching easier for clients + +### Non-Goals/Future Work + +- `kubebuilder` implementation and design in another PR +- Changing the default `controller-runtime` implementation +- Dynamically reloading `ComponentConfig` object +- Providing `flags` interface and overrides + +## Proposal + +The `ctrl.Manager` _SHOULD_ support loading configurations from `ComponentConfig` like objects. +An interface for that object with getters for the specific configuration parameters is created to bridge existing patterns. + +Without breaking the current `ctrl.NewManager` which uses an exported `ctrl.Options{}` the `manager.go` can expose a new func, `NewFromComponentConfig()` this would be able to loop through the getters to populate an internal `ctrl.Options{}` and pass that into `New()`. + +```golang +//pkg/manager/manager.go + +// ManagerConfiguration defines what the ComponentConfig object for ControllerRuntime needs to support +type ManagerConfiguration interface { + GetSyncPeriod() *time.Duration + + GetLeaderElection() bool + GetLeaderElectionNamespace() string + GetLeaderElectionID() string + + GetLeaseDuration() *time.Duration + GetRenewDeadline() *time.Duration + GetRetryPeriod() *time.Duration + + GetNamespace() string + GetMetricsBindAddress() string + GetHealthProbeBindAddress() string + + GetReadinessEndpointName() string + GetLivenessEndpointName() string + + GetPort() int + GetHost() string + + GetCertDir() string +} + +func NewFromComponentConfig(config *rest.Config, scheme *runtime.Scheme, filename string, managerconfig ManagerConfiguration) (Manager, error) { + codecs := serializer.NewCodecFactory(scheme) + if err := decodeComponentConfigFileInto(codecs, filename, managerconfig); err != nil { + + } + options := Options{} + + if scheme != nil { + options.Scheme = scheme + } + + // Loop through getters + if managerconfig.GetLeaderElection() { + options.LeaderElection = managerconfig.GetLeaderElection() + } + // ... + + return New(config, options) +} +``` + +#### ComponentConfig Load Order + +![ComponentConfig Load Order](/designs/images/component-config-load.png) + +#### Embeddable ComponentConfig Type + +To make this easier for Controller authors `controller-runtime` can expose a set of `config.ControllerConfiguration` type that can be embedded similar to the way that `k8s.io/apimachinery/pkg/apis/meta/v1` works for `TypeMeta` and `ObjectMeta` these could live in `pkg/api/config/v1alpha1/types.go`. See the `DefaultComponentConfig` type below for and example implementation. + +```golang +// pkg/api/config/v1alpha1/types.go +package v1alpha1 + +import ( + "time" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the embedded RuntimeConfiguration for controller-runtime clients. +type ControllerManagerConfiguration struct { + Namespace string `json:"namespace,omitempty"` + + SyncPeriod *time.Duration `json:"syncPeriod,omitempty"` + + LeaderElection configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + MetricsBindAddress string `json:"metricsBindAddress,omitempty"` + + Health ControllerManagerConfigurationHealth `json:"health,omitempty"` + + Port *int `json:"port,omitempty"` + Host string `json:"host,omitempty"` + + CertDir string `json:"certDir,omitempty"` +} + +// ControllerManagerConfigurationHealth defines the health configs +type ControllerManagerConfigurationHealth struct { + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} +``` + + + +#### Default ComponentConfig Type + +To enable `controller-runtime` to have a default `ComponentConfig` struct which can be used instead of requiring each controller or extension to build its own `ComponentConfig` type, we can create a `DefaultControllerConfiguration` type which can exist in `pkg/api/config/v1alpha1/types.go`. This will allow the controller authors to use this before needing to implement their own type with additional configs. + +```golang +// pkg/api/config/v1alpha1/types.go +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + configv1alpha1 "sigs.k8s.io/controller-runtime/pkg/apis/config/v1alpha1" +) + +// DefaultControllerManagerConfiguration is the Schema for the DefaultControllerManagerConfigurations API +type DefaultControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + Spec configv1alpha1.ControllerManagerConfiguration `json:"spec,omitempty"` +} +``` + +This would allow a controller author to use this struct with any config that supports the json/yaml structure. For example a controller author could define their `Kind` as `FoobarControllerConfiguration` and have it defined as the following. + +```yaml +# config.yaml +apiVersion: config.somedomain.io/v1alpha1 +kind: FoobarControllerManagerConfiguration +spec: + port: 9443 + metricsBindAddress: ":8080" + leaderElection: + leaderElect: false +``` + +Given the following config and `DefaultControllerManagerConfiguration` we'd be able to initialize the controller using the following. + + +```golang +mgr, err := ctrl.NewManagerFromComponentConfig(ctrl.GetConfigOrDie(), scheme, configname, &defaultv1alpha1.DefaultControllerManagerConfiguration{}) +if err != nil { + // ... +} +``` + +The above example uses `configname` which is the name of the file to load the configuration from and uses `scheme` to get the specific serializer, eg `serializer.NewCodecFactory(scheme)`. This will allow the configuration to be unmarshalled into the `runtime.Object` type and passed into the +`ctrl.NewManagerFromComponentConfig()` as a `ManagerConfiguration` interface. + +#### Using Flags w/ ComponentConfig + +Since this design still requires setting up the initial `ComponentConfig` type and passing in a pointer to `ctrl.NewFromComponentConfig()` if you want to allow for the use of flags, your controller can use any of the different flagging interfaces. eg [`flag`](https://golang.org/pkg/flag/), [`pflag`](https://pkg.go.dev/github.com/spf13/pflag), [`flagnum`](https://pkg.go.dev/github.com/luci/luci-go/common/flag/flagenum) and set values on the `ComponentConfig` type prior to passing the pointer into the `ctrl.NewFromComponentConfig()`, example below. + +```golang +leaderElect := true + +config := &defaultv1alpha1.DefaultControllerManagerConfiguration{ + Spec: configv1alpha1.ControllerManagerConfiguration{ + LeaderElection: configv1alpha1.LeaderElectionConfiguration{ + LeaderElect: &leaderElect, + }, + }, +} +mgr, err := ctrl.NewManagerFromComponentConfig(ctrl.GetConfigOrDie(), scheme, configname, config) +if err != nil { + // ... +} +``` + +#### Kubebuilder Scaffolding Example + +Within expanded in a separate design _(link once created)_ this will allow controller authors to generate a type that implements the `ManagerConfiguration` interface. The following is a sample of what this looks like: + +```golang +package config + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + configv1alpha1 "sigs.k8s.io/controller-runtime/pkg/apis/config/v1alpha1" +) + +type ControllerNameConfigurationSpec struct { + configv1alpha1.ControllerManagerConfiguration `json:",inline"` +} + +type ControllerNameConfiguration struct { + metav1.TypeMeta + + Spec ControllerNameConfigurationSpec `json:"spec"` +} +``` + +Usage of this custom `ComponentConfig` type would require then changing the `ctrl.NewFromComponentConfig()` to use the new struct vs the `DefaultControllerManagerConfiguration`. + +## User Stories + +### Controller Author with `controller-runtime` and default type + +- Mount `ConfigMap` +- Initialize `ctrl.Manager` with `NewFromComponentConfig` with config name and `DefaultControllerManagerConfiguration` type +- Build custom controller as usual + +### Controller Author with `controller-runtime` and custom type + +- Implement `ComponentConfig` type +- Embed `ControllerManagerConfiguration` type +- Mount `ConfigMap` +- Initialize `ctrl.Manager` with `NewFromComponentConfig` with config name and `ComponentConfig` type +- Build custom controller as usual + +### Controller Author with `kubebuilder` (tbd proposal for `kubebuilder`) + +- Initialize `kubebuilder` project using `--component-config-name=XYZConfiguration` +- Build custom controller as usual + +### Controller User without modifications to config + +_Provided that the controller provides manifests_ + +- Apply the controller to the cluster +- Deploy custom resources + +### Controller User with modifications to config + +- _Following from previous example without changes_ +- Create a new `ConfigMap` for changes +- Modify the `controller-runtime` pod to use the new `ConfigMap` +- Apply the controller to the cluster +- Deploy custom resources + + +## Risks and Mitigations + +- Given that this isn't changing the core Manager initialization for `controller-runtime` it's fairly low risk + +## Alternatives + +* `NewFromComponentConfig()` could load the object from disk based on the file name and hydrate the `ComponentConfig` type. + +## Implementation History + +- [x] 02/19/2020: Proposed idea in an issue or [community meeting] +- [x] 02/24/2020: Proposal submitted to `controller-runtime` +- [x] 03/02/2020: Updated with default `DefaultControllerManagerConfiguration` +- [x] 03/04/2020: Updated with embeddable `RuntimeConfig` +- [x] 03/10/2020: Updated embeddable name to `ControllerManagerConfiguration` + + + +[community meeting]: https://docs.google.com/document/d/1Ih-2cgg1bUrLwLVTB9tADlPcVdgnuMNBGbUl4D-0TIk diff --git a/designs/images/component-config-load.png b/designs/images/component-config-load.png new file mode 100644 index 0000000000..04619779da Binary files /dev/null and b/designs/images/component-config-load.png differ diff --git a/designs/move-cluster-specific-code-out-of-manager.md b/designs/move-cluster-specific-code-out-of-manager.md new file mode 100644 index 0000000000..67b7a419a5 --- /dev/null +++ b/designs/move-cluster-specific-code-out-of-manager.md @@ -0,0 +1,228 @@ +Move cluster-specific code out of the manager +=================== + +## Motivation + +Today, it is already possible to use controller-runtime to build controllers that act on +more than one cluster. However, this is undocumented and not straight-forward, requiring +users to look into the implementation details to figure out how to make this work. + +## Goals + +* Provide an easy-to-discover way to build controllers that act on multiple clusters +* Decouple the management of `Runnables` from the construction of "things that require a kubeconfig" +* Do not introduce changes for users that build controllers that act on one cluster only + +## Non-Goals + +## Proposal + +Currently, the `./pkg/manager.Manager` has two purposes: + +* Handle running controllers/other runnables and managing their lifecycle +* Setting up various things to interact with the Kubernetes cluster, + for example a `Client` and a `Cache` + +This works very well when building controllers that talk to a single cluster, +however some use-cases require controllers that interact with more than +one cluster. This multi-cluster usecase is very awkward today, because it +requires to construct one manager per cluster and adding all subsequent +managers to the first one. + +This document proposes to move all cluster-specific code out of the manager +and into a new package and interface, that then gets embedded into the manager. +This allows to keep the usage for single-cluster cases the same and introduce +this change in a backwards-compatible manner. + +Furthermore, the manager gets extended to start all caches before any other +`runnables` are started. + + +The new `Cluster` interface will look like this: + +```go +type Cluster interface { + // SetFields will set cluster-specific dependencies on an object for which the object has implemented the inject + // interface, specifically inject.Client, inject.Cache, inject.Scheme, inject.Config and inject.APIReader + SetFields(interface{}) error + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // Start starts the connection tothe Cluster + Start(<-chan struct{}) error +} +``` + +And the current `Manager` interface will change to look like this: + +```go +type Manager interface { + // Cluster holds objects to connect to a cluster + cluser.Cluster + + // Add will set requested dependencies on the component, and cause the component to be + // started when Start is called. Add will inject any dependencies for which the argument + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). + Add(Runnable) error + + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} + + // SetFields will set any dependencies on an object for which the object has implemented the inject + // interface - e.g. inject.Client. + SetFields(interface{}) error + + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be + // sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as + // Runnable to the manager via Add method. + AddMetricsExtraHandler(path string, handler http.Handler) error + + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + + // Start starts all registered Controllers and blocks until the Stop channel is closed. + // Returns an error if there is an error starting any controller. + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(<-chan struct{}) error + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server +} +``` + +Furthermore, during startup, the `Manager` will use type assertion to find `Cluster`s +to be able to start their caches before anything else: + +```go +type HasCaches interface { + GetCache() +} +if getter, hasCaches := runnable.(HasCaches); hasCaches { + m.caches = append(m.caches, getter()) +} +``` + +```go +for idx := range cm.caches { + go func(idx int) {cm.caches[idx].Start(cm.internalStop)} +} + +for _, cache := range cm.caches { + cache.WaitForCacheSync(cm.internalStop) +} + +// Start all other runnables +``` + +## Example + +Below is a sample `reconciler` that will create a secret in a `mirrorCluster` for each +secret found in `referenceCluster` if none of that name already exists. To keep the sample +short, it won't compare the contents of the secrets. + +```go +type secretMirrorReconciler struct { + referenceClusterClient, mirrorClusterClient client.Client +} + +func (r *secretMirrorReconciler) Reconcile(r reconcile.Request)(reconcile.Result, error){ + s := &corev1.Secret{} + if err := r.referenceClusterClient.Get(context.TODO(), r.NamespacedName, s); err != nil { + if kerrors.IsNotFound{ return reconcile.Result{}, nil } + return reconcile.Result, err + } + + if err := r.mirrorClusterClient.Get(context.TODO(), r.NamespacedName, &corev1.Secret); err != nil { + if !kerrors.IsNotFound(err) { + return reconcile.Result{}, err + } + + mirrorSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: s.Namespace, Name: s.Name}, + Data: s.Data, + } + return reconcile.Result{}, r.mirrorClusterClient.Create(context.TODO(), mirrorSecret) + } + + return nil +} + +func NewSecretMirrorReconciler(mgr manager.Manager, mirrorCluster cluster.Cluster) error { + return ctrl.NewControllerManagedBy(mgr). + // Watch Secrets in the reference cluster + For(&corev1.Secret{}). + // Watch Secrets in the mirror cluster + Watches( + source.NewKindWithCache(&corev1.Secret{}, mirrorCluster.GetCache()), + &handler.EnqueueRequestForObject{}, + ). + Complete(&secretMirrorReconciler{ + referenceClusterClient: mgr.GetClient(), + mirrorClusterClient: mirrorCluster.GetClient(), + }) + } +} + +func main(){ + + mgr, err := manager.New( cfg1, manager.Options{}) + if err != nil { + panic(err) + } + + mirrorCluster, err := cluster.New(cfg2) + if err != nil { + panic(err) + } + + if err := mgr.Add(mirrorCluster); err != nil { + panic(err) + } + + if err := NewSecretMirrorReconciler(mgr, mirrorCluster); err != nil { + panic(err) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + panic(err) + } +} +``` diff --git a/designs/template.md b/designs/template.md new file mode 100644 index 0000000000..c04d6e0c71 --- /dev/null +++ b/designs/template.md @@ -0,0 +1,21 @@ +Title of the Design +=================== + + + +## Example + + \ No newline at end of file diff --git a/designs/use-selectors-at-cache.md b/designs/use-selectors-at-cache.md new file mode 100644 index 0000000000..1d7ec6ecfb --- /dev/null +++ b/designs/use-selectors-at-cache.md @@ -0,0 +1,122 @@ +# Filter cache ListWatch using selectors + +## Motivation + +Controller-Runtime controllers use a cache to subscribe to events from +Kubernetes objects and to read those objects more efficiently by avoiding +to call out to the API. This cache is backed by Kubernetes informers. + +The only way to filter this cache is by namespace and resource type. +In cases where a controller is only interested in a small subset of objects +(for example all pods on a node), this might end up not being efficient enough. + +Requests to a client backed by a filtered cache for objects that do not match +the filter will never return anything, so we need to make sure that we properly +warn users to only use this when they are sure they know what they are doing. + +This proposal sidesteps the issue of "How to we plug this into the cache-backed +client so that users get feedback when they request something that is +not matching the caches filter" by only implementing the filter logic in the +cache package. This allows advanced users to combine a filtered cache with the +already existing `NewCacheFunc` option in the manager and cluster package, +while simultaneously hiding it from newer users that might not be aware of the +implications and the associated foot-shoot potential. + +The only alternative today to get a filtered cache with controller-runtime is +to build it out-of tree. Because such a cache would mostly copy the existing +cache and add just some options, this is not great for consumers. + +This proposal is related to the following issue [2] + +## Proposal + +Add a new selector code at `pkg/cache/internal/selector.go` with common structs +and helpers + +```golang +package internal + +... + +// SelectorsByObject associate a runtime.Object to a field/label selector +type SelectorsByObject map[client.Object]Selector + +// SelectorsByGVK associate a GroupVersionResource to a field/label selector +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +// Selector specify the label/field selector to fill in ListOptions +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { +... +} +``` + +Add a type alias to `pkg/cache/cache.go` to internal + +```golang +type SelectorsByObject internal.SelectorsByObject +``` + +Extend `cache.Options` as follows: + +```golang +type Options struct { + Scheme *runtime.Scheme + Mapper meta.RESTMapper + Resync *time.Duration + Namespace string + SelectorsByObject SelectorsByObject +} +``` + +Add new builder function that will return a cache constructor using the passed +cache.Options, users can set SelectorsByObject there to filter out cache, it +will convert SelectorByObject to SelectorsByGVK + +```golang +func BuilderWithOptions(options cache.Options) NewCacheFunc { +... +} +``` + +is passed to informer's ListWatch and add the filtering option: + +```golang + +# At pkg/cache/internal/informers_map.go + +ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors[gvk].ApplyToList(&opts) +... +``` + +Here is a PR with the implementatin at the `pkg/cache` part [3] + +## Example + +User will override `NewCache` function to make clear that they know exactly the +implications of using a different cache than the default one + +```golang + ctrl.Options.NewCache = cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &corev1.Node{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": "node01"}), + } + &v1beta1.NodeNetworkState{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": "node01"}), + Label: labels.SelectorFromSet(labels.Set{"app": "kubernetes-nmstate})", + } + } + } + ) +``` + +[1] https://github.com/nmstate/kubernetes-nmstate/pull/687 +[2] https://github.com/kubernetes-sigs/controller-runtime/issues/244 +[3] https://github.com/kubernetes-sigs/controller-runtime/pull/1404 diff --git a/doc.go b/doc.go new file mode 100644 index 0000000000..fa6c532c49 --- /dev/null +++ b/doc.go @@ -0,0 +1,128 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllerruntime provides tools to construct Kubernetes-style +// controllers that manipulate both Kubernetes CRDs and aggregated/built-in +// Kubernetes APIs. +// +// It defines easy helpers for the common use cases when building CRDs, built +// on top of customizable layers of abstraction. Common cases should be easy, +// and uncommon cases should be possible. In general, controller-runtime tries +// to guide users towards Kubernetes controller best-practices. +// +// # Getting Started +// +// The main entrypoint for controller-runtime is this root package, which +// contains all of the common types needed to get started building controllers: +// +// import ( +// ctrl "sigs.k8s.io/controller-runtime" +// ) +// +// The examples in this package walk through a basic controller setup. The +// kubebuilder book (https://book.kubebuilder.io) has some more in-depth +// walkthroughs. +// +// controller-runtime favors structs with sane defaults over constructors, so +// it's fairly common to see structs being used directly in controller-runtime. +// +// # Organization +// +// A brief-ish walkthrough of the layout of this library can be found below. Each +// package contains more information about how to use it. +// +// Frequently asked questions about using controller-runtime and designing +// controllers can be found at +// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// +// # Managers +// +// Every controller and webhook is ultimately run by a Manager (pkg/manager). A +// manager is responsible for running controllers and webhooks, and setting up +// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// well as managing leader election (pkg/leaderelection). Managers are +// generally configured to gracefully shut down controllers on pod termination +// by wiring up a signal handler (pkg/manager/signals). +// +// # Controllers +// +// Controllers (pkg/controller) use events (pkg/event) to eventually trigger +// reconcile requests. They may be constructed manually, but are often +// constructed with a Builder (pkg/builder), which eases the wiring of event +// sources (pkg/source), like Kubernetes API object changes, to event handlers +// (pkg/handler), like "enqueue a reconcile request for the object owner". +// Predicates (pkg/predicate) can be used to filter which events actually +// trigger reconciles. There are pre-written utilities for the common cases, and +// interfaces and helpers for advanced cases. +// +// # Reconcilers +// +// Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A +// Reconciler implements a function which takes a reconcile Request containing +// the name and namespace of the object to reconcile, reconciles the object, +// and returns a Response or an error indicating whether to requeue for a +// second round of processing. +// +// # Clients and Caches +// +// Reconcilers use Clients (pkg/client) to access API objects. The default +// client provided by the manager reads from a local shared cache (pkg/cache) +// and writes directly to the API server, but clients can be constructed that +// only talk to the API server, without a cache. The Cache will auto-populate +// with watched objects, as well as when other structured objects are +// requested. The default split client does not promise to invalidate the cache +// during writes (nor does it promise sequential create/get coherence), and code +// should not assume a get immediately following a create/update will return +// the updated resource. Caches may also have indexes, which can be created via +// a FieldIndexer (pkg/client) obtained from the manager. Indexes can used to +// quickly and easily look up all objects with certain fields set. Reconcilers +// may retrieve event recorders (pkg/recorder) to emit events using the +// manager. +// +// # Schemes +// +// Clients, Caches, and many other things in Kubernetes use Schemes +// (pkg/scheme) to associate Go types to Kubernetes API Kinds +// (Group-Version-Kinds, to be specific). +// +// # Webhooks +// +// Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but +// are often constructed using a builder (pkg/webhook/admission/builder). They +// are run via a server (pkg/webhook) which is managed by a Manager. +// +// # Logging and Metrics +// +// Logging (pkg/log) in controller-runtime is done via structured logs, using a +// log set of interfaces called logr +// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime +// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap), +// you can provide any implementation of logr as the base logger for +// controller-runtime. +// +// Metrics (pkg/metrics) provided by controller-runtime are registered into a +// controller-runtime-specific Prometheus metrics registry. The manager can +// serve these by an HTTP endpoint, and additional metrics may be registered to +// this Registry as normal. +// +// # Testing +// +// You can easily build integration and unit tests for your controllers and +// webhooks using the test Environment (pkg/envtest). This will automatically +// stand up a copy of etcd and kube-apiserver, and provide the correct options +// to connect to the API server. It's designed to work well with the Ginkgo +// testing framework, but should work with any testing setup. +package controllerruntime diff --git a/example_test.go b/example_test.go new file mode 100644 index 0000000000..beee06215a --- /dev/null +++ b/example_test.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerruntime_test + +import ( + "context" + "fmt" + "os" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// This example creates a simple application Controller that is configured for ReplicaSets and Pods. +// +// * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into +// ReplicaSetReconciler. +// +// * Start the application. +// TODO(pwittrock): Update this example when we have better dependency injection support. +func Example() { + var log = ctrl.Log.WithName("builder-examples") + + manager, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = ctrl. + NewControllerManagedBy(manager). // Create the Controller + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it + Complete(&ReplicaSetReconciler{Client: manager.GetClient()}) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := manager.Start(ctrl.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + +// This example creates a simple application Controller that is configured for ReplicaSets and Pods. +// This application controller will be running leader election with the provided configuration in the manager options. +// If leader election configuration is not provided, controller runs leader election with default values. +// Default values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go +// * defaultLeaseDuration = 15 * time.Second +// * defaultRenewDeadline = 10 * time.Second +// * defaultRetryPeriod = 2 * time.Second +// +// * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into +// ReplicaSetReconciler. +// +// * Start the application. +// TODO(pwittrock): Update this example when we have better dependency injection support. +func Example_updateLeaderElectionDurations() { + var log = ctrl.Log.WithName("builder-examples") + leaseDuration := 100 * time.Second + renewDeadline := 80 * time.Second + retryPeriod := 20 * time.Second + manager, err := ctrl.NewManager( + ctrl.GetConfigOrDie(), + ctrl.Options{ + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + }) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = ctrl. + NewControllerManagedBy(manager). // Create the Controller + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it + Complete(&ReplicaSetReconciler{Client: manager.GetClient()}) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := manager.Start(ctrl.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + +// ReplicaSetReconciler is a simple Controller example implementation. +type ReplicaSetReconciler struct { + client.Client +} + +// Implement the business logic: +// This function will be called when there is a change to a ReplicaSet or a Pod with an OwnerReference +// to a ReplicaSet. +// +// * Read the ReplicaSet +// * Read the Pods +// * Set a Label on the ReplicaSet with the Pod count. +func (a *ReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := a.Get(ctx, req.NamespacedName, rs) + if err != nil { + return ctrl.Result{}, err + } + + // List the Pods matching the PodTemplate Labels + pods := &corev1.PodList{} + err = a.List(ctx, pods, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return ctrl.Result{}, err + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(pods.Items)) + err = a.Update(context.TODO(), rs) + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..2110ae214e --- /dev/null +++ b/examples/README.md @@ -0,0 +1,36 @@ +# Examples + +These two examples represent the usage of `controller-runtime` libraries for built-in Kubernetes resources as well as custom resources. + +### builtins/ + +This example implements a custom controller and webhooks for the *existing* ReplicaSet resource. + +* `controller.go`: implements a reconciler for ReplicaSets +* `mutatingwebhook.go`: implements a mutating webhook that adds an annotation to every incoming Pod ("example-mutating-admission-webhook" = "foo") +* `validatingwebhook.go`: implements a validating webhook that checks to see if a Pod has the aforementioned annotation +* `main.go` + 1. Creates a new manager + 2. Creates a new controller that watches both ReplicaSets and Pods and reconciles the objects with the implemented reconciler + 3. Registers the mutating and validating webhooks with the manager + 4. Starts the manager + +### crd/ + +This example implements a *new* Kubernetes resource, ChaosPod, and creates a custom controller that watches it and webhooks that mutate and validate. + +* `pkg/` + * `resource.go`: defines the schema for the ChaosPod API and implements validate and mutate webhooks + * `groupversion_info.go`: specifies the Group and Version for the ChaosPod API + * `zz_generated.deepcopy.go`: deep copy functions generated by kubebuilder +* `main.go` + 1. Creates a new manager + 2. Adds ChaosPod resource to the manager's schema + 3. Implements a reconciler to execute the desired behavior of the ChaosPod API + 4. Creates a new controller that watches ChaosPods and reconciles the objects with the implemented reconciler + 5. Adds ChaosPod webhooks to manager + 6. Starts the manager + +## Deploying and Running + +To install and run the provided examples, see the Kubebuilder [Quick Start](https://book.kubebuilder.io/quick-start.html). \ No newline at end of file diff --git a/examples/builtins/controller.go b/examples/builtins/controller.go new file mode 100644 index 0000000000..6c8c5d935f --- /dev/null +++ b/examples/builtins/controller.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// reconcileReplicaSet reconciles ReplicaSets +type reconcileReplicaSet struct { + // client can be used to retrieve objects from the APIServer. + client client.Client +} + +// Implement reconcile.Reconciler so the controller can reconcile objects +var _ reconcile.Reconciler = &reconcileReplicaSet{} + +func (r *reconcileReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // set up a convenient log object so we don't have to type request over and over again + log := log.FromContext(ctx) + + // Fetch the ReplicaSet from the cache + rs := &appsv1.ReplicaSet{} + err := r.client.Get(ctx, request.NamespacedName, rs) + if errors.IsNotFound(err) { + log.Error(nil, "Could not find ReplicaSet") + return reconcile.Result{}, nil + } + + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not fetch ReplicaSet: %+v", err) + } + + // Print the ReplicaSet + log.Info("Reconciling ReplicaSet", "container name", rs.Spec.Template.Spec.Containers[0].Name) + + // Set the label if it is missing + if rs.Labels == nil { + rs.Labels = map[string]string{} + } + if rs.Labels["hello"] == "world" { + return reconcile.Result{}, nil + } + + // Update the ReplicaSet + rs.Labels["hello"] = "world" + err = r.client.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not write ReplicaSet: %+v", err) + } + + return reconcile.Result{}, nil +} diff --git a/examples/builtins/main.go b/examples/builtins/main.go new file mode 100644 index 0000000000..ff1f0dfa3b --- /dev/null +++ b/examples/builtins/main.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/source" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +func init() { + log.SetLogger(zap.New()) +} + +func main() { + entryLog := log.Log.WithName("entrypoint") + + // Setup a Manager + entryLog.Info("setting up manager") + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + entryLog.Error(err, "unable to set up overall controller manager") + os.Exit(1) + } + + // Setup a new controller to reconcile ReplicaSets + entryLog.Info("Setting up controller") + c, err := controller.New("foo-controller", mgr, controller.Options{ + Reconciler: &reconcileReplicaSet{client: mgr.GetClient()}, + }) + if err != nil { + entryLog.Error(err, "unable to set up individual controller") + os.Exit(1) + } + + // Watch ReplicaSets and enqueue ReplicaSet object key + if err := c.Watch(&source.Kind{Type: &appsv1.ReplicaSet{}}, &handler.EnqueueRequestForObject{}); err != nil { + entryLog.Error(err, "unable to watch ReplicaSets") + os.Exit(1) + } + + // Watch Pods and enqueue owning ReplicaSet key + if err := c.Watch(&source.Kind{Type: &corev1.Pod{}}, + &handler.EnqueueRequestForOwner{OwnerType: &appsv1.ReplicaSet{}, IsController: true}); err != nil { + entryLog.Error(err, "unable to watch Pods") + os.Exit(1) + } + + // Setup webhooks + entryLog.Info("setting up webhook server") + hookServer := mgr.GetWebhookServer() + + entryLog.Info("registering webhooks to the webhook server") + hookServer.Register("/mutate-v1-pod", &webhook.Admission{Handler: &podAnnotator{Client: mgr.GetClient()}}) + hookServer.Register("/validate-v1-pod", &webhook.Admission{Handler: &podValidator{Client: mgr.GetClient()}}) + + entryLog.Info("starting manager") + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + entryLog.Error(err, "unable to run manager") + os.Exit(1) + } +} diff --git a/examples/builtins/mutatingwebhook.go b/examples/builtins/mutatingwebhook.go new file mode 100644 index 0000000000..a4f4eee508 --- /dev/null +++ b/examples/builtins/mutatingwebhook.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "encoding/json" + "net/http" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// +kubebuilder:webhook:path=/mutate-v1-pod,mutating=true,failurePolicy=fail,groups="",resources=pods,verbs=create;update,versions=v1,name=mpod.kb.io + +// podAnnotator annotates Pods +type podAnnotator struct { + Client client.Client + decoder *admission.Decoder +} + +// podAnnotator adds an annotation to every incoming pods. +func (a *podAnnotator) Handle(ctx context.Context, req admission.Request) admission.Response { + pod := &corev1.Pod{} + + err := a.decoder.Decode(req, pod) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations["example-mutating-admission-webhook"] = "foo" + + marshaledPod, err := json.Marshal(pod) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) +} + +// podAnnotator implements admission.DecoderInjector. +// A decoder will be automatically injected. + +// InjectDecoder injects the decoder. +func (a *podAnnotator) InjectDecoder(d *admission.Decoder) error { + a.decoder = d + return nil +} diff --git a/examples/builtins/validatingwebhook.go b/examples/builtins/validatingwebhook.go new file mode 100644 index 0000000000..57bc526574 --- /dev/null +++ b/examples/builtins/validatingwebhook.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "net/http" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// +kubebuilder:webhook:path=/validate-v1-pod,mutating=false,failurePolicy=fail,groups="",resources=pods,verbs=create;update,versions=v1,name=vpod.kb.io + +// podValidator validates Pods +type podValidator struct { + Client client.Client + decoder *admission.Decoder +} + +// podValidator admits a pod if a specific annotation exists. +func (v *podValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + pod := &corev1.Pod{} + + err := v.decoder.Decode(req, pod) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + key := "example-mutating-admission-webhook" + anno, found := pod.Annotations[key] + if !found { + return admission.Denied(fmt.Sprintf("missing annotation %s", key)) + } + if anno != "foo" { + return admission.Denied(fmt.Sprintf("annotation %s did not have value %q", key, "foo")) + } + + return admission.Allowed("") +} + +// podValidator implements admission.DecoderInjector. +// A decoder will be automatically injected. + +// InjectDecoder injects the decoder. +func (v *podValidator) InjectDecoder(d *admission.Decoder) error { + v.decoder = d + return nil +} diff --git a/examples/configfile/builtin/config.yaml b/examples/configfile/builtin/config.yaml new file mode 100644 index 0000000000..39ac86ce60 --- /dev/null +++ b/examples/configfile/builtin/config.yaml @@ -0,0 +1,7 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: ControllerManagerConfiguration +cacheNamespace: default +metrics: + bindAddress: :9091 +leaderElection: + leaderElect: false diff --git a/examples/configfile/builtin/controller.go b/examples/configfile/builtin/controller.go new file mode 100644 index 0000000000..8349bcd5aa --- /dev/null +++ b/examples/configfile/builtin/controller.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// reconcileReplicaSet reconciles ReplicaSets +type reconcileReplicaSet struct { + // client can be used to retrieve objects from the APIServer. + client client.Client +} + +// Implement reconcile.Reconciler so the controller can reconcile objects +var _ reconcile.Reconciler = &reconcileReplicaSet{} + +func (r *reconcileReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // set up a convenient log object so we don't have to type request over and over again + log := log.FromContext(ctx) + + // Fetch the ReplicaSet from the cache + rs := &appsv1.ReplicaSet{} + err := r.client.Get(context.TODO(), request.NamespacedName, rs) + if errors.IsNotFound(err) { + log.Error(nil, "Could not find ReplicaSet") + return reconcile.Result{}, nil + } + + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not fetch ReplicaSet: %+v", err) + } + + // Print the ReplicaSet + log.Info("Reconciling ReplicaSet", "container name", rs.Spec.Template.Spec.Containers[0].Name) + + // Set the label if it is missing + if rs.Labels == nil { + rs.Labels = map[string]string{} + } + if rs.Labels["hello"] == "world" { + return reconcile.Result{}, nil + } + + // Update the ReplicaSet + rs.Labels["hello"] = "world" + err = r.client.Update(context.TODO(), rs) + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not write ReplicaSet: %+v", err) + } + + return reconcile.Result{}, nil +} diff --git a/examples/configfile/builtin/main.go b/examples/configfile/builtin/main.go new file mode 100644 index 0000000000..abd6180d19 --- /dev/null +++ b/examples/configfile/builtin/main.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +var scheme = runtime.NewScheme() + +func init() { + log.SetLogger(zap.New()) + clientgoscheme.AddToScheme(scheme) +} + +func main() { + entryLog := log.Log.WithName("entrypoint") + + // Setup a Manager + entryLog.Info("setting up manager") + mgr, err := ctrl.NewManager(config.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + }.AndFromOrDie(cfg.File())) + if err != nil { + entryLog.Error(err, "unable to set up overall controller manager") + os.Exit(1) + } + + // Setup a new controller to reconcile ReplicaSets + err = ctrl.NewControllerManagedBy(mgr). + For(&appsv1.ReplicaSet{}). + Owns(&corev1.Pod{}). + Complete(&reconcileReplicaSet{ + client: mgr.GetClient(), + }) + if err != nil { + entryLog.Error(err, "unable to create controller") + os.Exit(1) + } + + entryLog.Info("starting manager") + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + entryLog.Error(err, "unable to run manager") + os.Exit(1) + } +} diff --git a/examples/configfile/custom/config.yaml b/examples/configfile/custom/config.yaml new file mode 100644 index 0000000000..bf9ac044b4 --- /dev/null +++ b/examples/configfile/custom/config.yaml @@ -0,0 +1,8 @@ +apiVersion: examples.x-k8s.io/v1alpha1 +kind: CustomControllerManagerConfiguration +clusterName: example-test +cacheNamespace: default +metrics: + bindAddress: :8081 +leaderElection: + leaderElect: false diff --git a/examples/configfile/custom/controller.go b/examples/configfile/custom/controller.go new file mode 100644 index 0000000000..8349bcd5aa --- /dev/null +++ b/examples/configfile/custom/controller.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// reconcileReplicaSet reconciles ReplicaSets +type reconcileReplicaSet struct { + // client can be used to retrieve objects from the APIServer. + client client.Client +} + +// Implement reconcile.Reconciler so the controller can reconcile objects +var _ reconcile.Reconciler = &reconcileReplicaSet{} + +func (r *reconcileReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // set up a convenient log object so we don't have to type request over and over again + log := log.FromContext(ctx) + + // Fetch the ReplicaSet from the cache + rs := &appsv1.ReplicaSet{} + err := r.client.Get(context.TODO(), request.NamespacedName, rs) + if errors.IsNotFound(err) { + log.Error(nil, "Could not find ReplicaSet") + return reconcile.Result{}, nil + } + + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not fetch ReplicaSet: %+v", err) + } + + // Print the ReplicaSet + log.Info("Reconciling ReplicaSet", "container name", rs.Spec.Template.Spec.Containers[0].Name) + + // Set the label if it is missing + if rs.Labels == nil { + rs.Labels = map[string]string{} + } + if rs.Labels["hello"] == "world" { + return reconcile.Result{}, nil + } + + // Update the ReplicaSet + rs.Labels["hello"] = "world" + err = r.client.Update(context.TODO(), rs) + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not write ReplicaSet: %+v", err) + } + + return reconcile.Result{}, nil +} diff --git a/examples/configfile/custom/main.go b/examples/configfile/custom/main.go new file mode 100644 index 0000000000..e0fc95e337 --- /dev/null +++ b/examples/configfile/custom/main.go @@ -0,0 +1,78 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/examples/configfile/custom/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +var scheme = runtime.NewScheme() + +func init() { + log.SetLogger(zap.New()) + clientgoscheme.AddToScheme(scheme) + v1alpha1.AddToScheme(scheme) +} + +func main() { + entryLog := log.Log.WithName("entrypoint") + + // Setup a Manager + entryLog.Info("setting up manager") + ctrlConfig := v1alpha1.CustomControllerManagerConfiguration{} + + mgr, err := ctrl.NewManager(config.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + }.AndFromOrDie(cfg.File().OfKind(&ctrlConfig))) + if err != nil { + entryLog.Error(err, "unable to set up overall controller manager") + os.Exit(1) + } + + entryLog.Info("setting up cluster", "name", ctrlConfig.ClusterName) + + // Watch ReplicaSets and enqueue ReplicaSet object key + err = ctrl.NewControllerManagedBy(mgr). + For(&appsv1.ReplicaSet{}). + Owns(&corev1.Pod{}). + Complete(&reconcileReplicaSet{ + client: mgr.GetClient(), + }) + if err != nil { + entryLog.Error(err, "unable to create controller") + os.Exit(1) + } + + entryLog.Info("starting manager") + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + entryLog.Error(err, "unable to run manager") + os.Exit(1) + } +} diff --git a/examples/configfile/custom/v1alpha1/types.go b/examples/configfile/custom/v1alpha1/types.go new file mode 100644 index 0000000000..79e8422c5c --- /dev/null +++ b/examples/configfile/custom/v1alpha1/types.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 provides the CustomControllerManagerConfiguration used for +// demoing componentconfig +// +kubebuilder:object:generate=true +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "examples.x-k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// +kubebuilder:object:root=true + +// CustomControllerManagerConfiguration is the Schema for the CustomControllerManagerConfigurations API +type CustomControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfigurationSpec returns the contfigurations for controllers + cfg.ControllerManagerConfigurationSpec `json:",inline"` + + ClusterName string `json:"clusterName,omitempty"` +} + +func init() { + SchemeBuilder.Register(&CustomControllerManagerConfiguration{}) +} diff --git a/examples/configfile/custom/v1alpha1/zz_generated.deepcopy.go b/examples/configfile/custom/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b9d3b6b4b9 --- /dev/null +++ b/examples/configfile/custom/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,35 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomControllerManagerConfiguration) DeepCopyInto(out *CustomControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomControllerManagerConfiguration. +func (in *CustomControllerManagerConfiguration) DeepCopy() *CustomControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(CustomControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/examples/crd/main.go b/examples/crd/main.go new file mode 100644 index 0000000000..1f6cd5fac2 --- /dev/null +++ b/examples/crd/main.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "math/rand" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + api "sigs.k8s.io/controller-runtime/examples/crd/pkg" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + setupLog = ctrl.Log.WithName("setup") +) + +type reconciler struct { + client.Client + scheme *runtime.Scheme +} + +func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx).WithValues("chaospod", req.NamespacedName) + log.V(1).Info("reconciling chaos pod") + + var chaospod api.ChaosPod + if err := r.Get(ctx, req.NamespacedName, &chaospod); err != nil { + log.Error(err, "unable to get chaosctl") + return ctrl.Result{}, err + } + + var pod corev1.Pod + podFound := true + if err := r.Get(ctx, req.NamespacedName, &pod); err != nil { + if !apierrors.IsNotFound(err) { + log.Error(err, "unable to get pod") + return ctrl.Result{}, err + } + podFound = false + } + + if podFound { + shouldStop := chaospod.Spec.NextStop.Time.Before(time.Now()) + if !shouldStop { + return ctrl.Result{RequeueAfter: chaospod.Spec.NextStop.Sub(time.Now()) + 1*time.Second}, nil + } + + if err := r.Delete(ctx, &pod); err != nil { + log.Error(err, "unable to delete pod") + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + templ := chaospod.Spec.Template.DeepCopy() + pod.ObjectMeta = templ.ObjectMeta + pod.Name = req.Name + pod.Namespace = req.Namespace + pod.Spec = templ.Spec + + if err := ctrl.SetControllerReference(&chaospod, &pod, r.scheme); err != nil { + log.Error(err, "unable to set pod's owner reference") + return ctrl.Result{}, err + } + + if err := r.Create(ctx, &pod); err != nil { + log.Error(err, "unable to create pod") + return ctrl.Result{}, err + } + + chaospod.Spec.NextStop.Time = time.Now().Add(time.Duration(10*(rand.Int63n(2)+1)) * time.Second) + chaospod.Status.LastRun = pod.CreationTimestamp + if err := r.Update(ctx, &chaospod); err != nil { + log.Error(err, "unable to update chaosctl status") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func main() { + ctrl.SetLogger(zap.New()) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // in a real controller, we'd create a new scheme for this + err = api.AddToScheme(mgr.GetScheme()) + if err != nil { + setupLog.Error(err, "unable to add scheme") + os.Exit(1) + } + + err = ctrl.NewControllerManagedBy(mgr). + For(&api.ChaosPod{}). + Owns(&corev1.Pod{}). + Complete(&reconciler{ + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + }) + if err != nil { + setupLog.Error(err, "unable to create controller") + os.Exit(1) + } + + err = ctrl.NewWebhookManagedBy(mgr). + For(&api.ChaosPod{}). + Complete() + if err != nil { + setupLog.Error(err, "unable to create webhook") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/examples/crd/pkg/groupversion_info.go b/examples/crd/pkg/groupversion_info.go new file mode 100644 index 0000000000..04953dd939 --- /dev/null +++ b/examples/crd/pkg/groupversion_info.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +kubebuilder:object:generate=true +// +groupName=chaosapps.metamagical.io +package pkg + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + log = logf.Log.WithName("chaospod-resource") + + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "chaosapps.metamagical.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/examples/crd/pkg/resource.go b/examples/crd/pkg/resource.go new file mode 100644 index 0000000000..9c3d4c72bc --- /dev/null +++ b/examples/crd/pkg/resource.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pkg + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// ChaosPodSpec defines the desired state of ChaosPod +type ChaosPodSpec struct { + Template corev1.PodTemplateSpec `json:"template"` + // +optional + NextStop metav1.Time `json:"nextStop,omitempty"` +} + +// ChaosPodStatus defines the observed state of ChaosPod. +// It should always be reconstructable from the state of the cluster and/or outside world. +type ChaosPodStatus struct { + LastRun metav1.Time `json:"lastRun,omitempty"` +} + +// +kubebuilder:object:root=true + +// ChaosPod is the Schema for the randomjobs API +// +kubebuilder:printcolumn:name="next stop",type="string",JSONPath=".spec.nextStop",format="date" +// +kubebuilder:printcolumn:name="last run",type="string",JSONPath=".status.lastRun",format="date" +type ChaosPod struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ChaosPodSpec `json:"spec,omitempty"` + Status ChaosPodStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ChaosPodList contains a list of ChaosPod +type ChaosPodList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ChaosPod `json:"items"` +} + +// +kubebuilder:webhook:path=/validate-chaosapps-metamagical-io-v1-chaospod,mutating=false,failurePolicy=fail,groups=chaosapps.metamagical.io,resources=chaospods,verbs=create;update,versions=v1,name=vchaospod.kb.io + +var _ webhook.Validator = &ChaosPod{} + +// ValidateCreate implements webhookutil.validator so a webhook will be registered for the type +func (c *ChaosPod) ValidateCreate() error { + log.Info("validate create", "name", c.Name) + + if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { + return fmt.Errorf(".spec.nextStop must be later than current time") + } + return nil +} + +// ValidateUpdate implements webhookutil.validator so a webhook will be registered for the type +func (c *ChaosPod) ValidateUpdate(old runtime.Object) error { + log.Info("validate update", "name", c.Name) + + if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { + return fmt.Errorf(".spec.nextStop must be later than current time") + } + + oldC, ok := old.(*ChaosPod) + if !ok { + return fmt.Errorf("expect old object to be a %T instead of %T", oldC, old) + } + if c.Spec.NextStop.After(oldC.Spec.NextStop.Add(time.Hour)) { + return fmt.Errorf("it is not allowed to delay.spec.nextStop for more than 1 hour") + } + return nil +} + +// ValidateDelete implements webhookutil.validator so a webhook will be registered for the type +func (c *ChaosPod) ValidateDelete() error { + log.Info("validate delete", "name", c.Name) + + if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { + return fmt.Errorf(".spec.nextStop must be later than current time") + } + return nil +} + +// +kubebuilder:webhook:path=/mutate-chaosapps-metamagical-io-v1-chaospod,mutating=true,failurePolicy=fail,groups=chaosapps.metamagical.io,resources=chaospods,verbs=create;update,versions=v1,name=mchaospod.kb.io + +var _ webhook.Defaulter = &ChaosPod{} + +// Default implements webhookutil.defaulter so a webhook will be registered for the type +func (c *ChaosPod) Default() { + log.Info("default", "name", c.Name) + + if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { + c.Spec.NextStop = metav1.Time{Time: time.Now().Add(time.Minute)} + } +} + +func init() { + SchemeBuilder.Register(&ChaosPod{}, &ChaosPodList{}) +} diff --git a/examples/crd/pkg/zz_generated.deepcopy.go b/examples/crd/pkg/zz_generated.deepcopy.go new file mode 100644 index 0000000000..cd506a87c0 --- /dev/null +++ b/examples/crd/pkg/zz_generated.deepcopy.go @@ -0,0 +1,121 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package pkg + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (c *ChaosPod) DeepCopyInto(out *ChaosPod) { + *out = *c + out.TypeMeta = c.TypeMeta + c.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + c.Spec.DeepCopyInto(&out.Spec) + c.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosPod. +func (c *ChaosPod) DeepCopy() *ChaosPod { + if c == nil { + return nil + } + out := new(ChaosPod) + c.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (c *ChaosPod) DeepCopyObject() runtime.Object { + if c := c.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChaosPodList) DeepCopyInto(out *ChaosPodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ChaosPod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosPodList. +func (in *ChaosPodList) DeepCopy() *ChaosPodList { + if in == nil { + return nil + } + out := new(ChaosPodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ChaosPodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChaosPodSpec) DeepCopyInto(out *ChaosPodSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + in.NextStop.DeepCopyInto(&out.NextStop) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosPodSpec. +func (in *ChaosPodSpec) DeepCopy() *ChaosPodSpec { + if in == nil { + return nil + } + out := new(ChaosPodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChaosPodStatus) DeepCopyInto(out *ChaosPodStatus) { + *out = *in + in.LastRun.DeepCopyInto(&out.LastRun) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosPodStatus. +func (in *ChaosPodStatus) DeepCopy() *ChaosPodStatus { + if in == nil { + return nil + } + out := new(ChaosPodStatus) + in.DeepCopyInto(out) + return out +} diff --git a/examples/scratch-env/go.mod b/examples/scratch-env/go.mod new file mode 100644 index 0000000000..59da6b3d9e --- /dev/null +++ b/examples/scratch-env/go.mod @@ -0,0 +1,10 @@ +module sigs.k8s.io/controller-runtime/examples/scratch-env + +go 1.15 + +require ( + github.com/spf13/pflag v1.0.5 + sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 +) + +replace sigs.k8s.io/controller-runtime => ../.. diff --git a/examples/scratch-env/go.sum b/examples/scratch-env/go.sum new file mode 100644 index 0000000000..ffafe85781 --- /dev/null +++ b/examples/scratch-env/go.sum @@ -0,0 +1,840 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.2 h1:HFB2fbVIlhIfCfOW81bZFbiC/RvnpXSdhbF2/DJr134= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.12.0 h1:p4oGGk2M2UJc0wWN4lHFvIB71lxsh0T/UiKCCgFADY8= +github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.1 h1:94bbZ5NTjdINJEdzOkpS4vdPhkb1VFpTYC9zh43f75c= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/client-go v0.21.1 h1:bhblWYLZKUu+pm50plvQF8WpY6TXdRRtcS/K9WauOj4= +k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210517184530-5a248b5acedc h1:cIS13bDBZaWqngldgGuDypv4z+zjcYgTKv72k6bMAn0= +k8s.io/utils v0.0.0-20210517184530-5a248b5acedc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/examples/scratch-env/main.go b/examples/scratch-env/main.go new file mode 100644 index 0000000000..b8305ffed3 --- /dev/null +++ b/examples/scratch-env/main.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + goflag "flag" + "os" + + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + crdPaths = flag.StringSlice("crd-paths", nil, "paths to files or directories containing CRDs to install on start") + webhookPaths = flag.StringSlice("webhook-paths", nil, "paths to files or directories containing webhook configurations to install on start") + attachControlPlaneOut = flag.Bool("debug-env", false, "attach to test env (apiserver & etcd) output -- just a convinience flag to force KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true") +) + +// have a separate function so we can return an exit code w/o skipping defers +func runMain() int { + loggerOpts := &logzap.Options{ + Development: true, // a sane default + ZapOpts: []zap.Option{zap.AddCaller()}, + } + { + var goFlagSet goflag.FlagSet + loggerOpts.BindFlags(&goFlagSet) + flag.CommandLine.AddGoFlagSet(&goFlagSet) + } + flag.Parse() + ctrl.SetLogger(logzap.New(logzap.UseFlagOptions(loggerOpts))) + ctrl.Log.Info("Starting...") + + log := ctrl.Log.WithName("main") + + env := &envtest.Environment{} + env.CRDInstallOptions.Paths = *crdPaths + env.WebhookInstallOptions.Paths = *webhookPaths + + if *attachControlPlaneOut { + os.Setenv("KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT", "true") + } + + log.Info("Starting apiserver & etcd") + cfg, err := env.Start() + if err != nil { + log.Error(err, "unable to start the test environment") + // shut down the environment in case we started it and failed while + // installing CRDs or provisioning users. + if err := env.Stop(); err != nil { + log.Error(err, "unable to stop the test environment after an error (this might be expected, but just though you should know)") + } + return 1 + } + + log.Info("apiserver running", "host", cfg.Host) + + // NB(directxman12): this group is unfortunately named, but various + // kubernetes versions require us to use it to get "admin" access. + user, err := env.ControlPlane.AddUser(envtest.User{ + Name: "envtest-admin", + Groups: []string{"system:masters"}, + }, nil) + if err != nil { + log.Error(err, "unable to provision admin user, continuing on without it") + return 1 + } + + // TODO(directxman12): add support for writing to a new context in an existing file + kubeconfigFile, err := os.CreateTemp("", "scratch-env-kubeconfig-") + if err != nil { + log.Error(err, "unable to create kubeconfig file, continuing on without it") + return 1 + } + defer os.Remove(kubeconfigFile.Name()) + + { + log := log.WithValues("path", kubeconfigFile.Name()) + log.V(1).Info("Writing kubeconfig") + + kubeConfig, err := user.KubeConfig() + if err != nil { + log.Error(err, "unable to create kubeconfig") + } + + if _, err := kubeconfigFile.Write(kubeConfig); err != nil { + log.Error(err, "unable to save kubeconfig") + return 1 + } + + log.Info("Wrote kubeconfig") + } + + if opts := env.WebhookInstallOptions; opts.LocalServingPort != 0 { + log.Info("webhooks configured for", "host", opts.LocalServingHost, "port", opts.LocalServingPort, "dir", opts.LocalServingCertDir) + } + + ctx := ctrl.SetupSignalHandler() + <-ctx.Done() + + log.Info("Shutting down apiserver & etcd") + err = env.Stop() + if err != nil { + log.Error(err, "unable to stop the test environment") + return 1 + } + + log.Info("Shutdown successful") + return 0 +} + +func main() { + os.Exit(runMain()) +} diff --git a/examples/tokenreview/main.go b/examples/tokenreview/main.go new file mode 100644 index 0000000000..d018956f96 --- /dev/null +++ b/examples/tokenreview/main.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/webhook/authentication" +) + +func init() { + log.SetLogger(zap.New()) +} + +func main() { + entryLog := log.Log.WithName("entrypoint") + + // Setup a Manager + entryLog.Info("setting up manager") + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + entryLog.Error(err, "unable to set up overall controller manager") + os.Exit(1) + } + + // Setup webhooks + entryLog.Info("setting up webhook server") + hookServer := mgr.GetWebhookServer() + + entryLog.Info("registering webhooks to the webhook server") + hookServer.Register("/validate-v1-tokenreview", &authentication.Webhook{Handler: &authenticator{}}) + + entryLog.Info("starting manager") + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + entryLog.Error(err, "unable to run manager") + os.Exit(1) + } +} diff --git a/examples/tokenreview/tokenreview.go b/examples/tokenreview/tokenreview.go new file mode 100644 index 0000000000..cc64545e16 --- /dev/null +++ b/examples/tokenreview/tokenreview.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + + "sigs.k8s.io/controller-runtime/pkg/webhook/authentication" +) + +// authenticator validates tokenreviews +type authenticator struct { +} + +// authenticator admits a request by the token. +func (a *authenticator) Handle(ctx context.Context, req authentication.Request) authentication.Response { + if req.Spec.Token == "invalid" { + return authentication.Unauthenticated("invalid is an invalid token", v1.UserInfo{}) + } + return authentication.Authenticated("", v1.UserInfo{}) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..02d9b79580 --- /dev/null +++ b/go.mod @@ -0,0 +1,76 @@ +module sigs.k8s.io/controller-runtime + +go 1.17 + +require ( + github.com/evanphx/json-patch/v5 v5.6.0 + github.com/fsnotify/fsnotify v1.5.4 + github.com/go-logr/logr v1.2.3 + github.com/go-logr/zapr v1.2.3 + github.com/google/go-cmp v0.5.8 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.19.0 + github.com/prometheus/client_golang v1.12.2 + github.com/prometheus/client_model v0.2.0 + go.uber.org/goleak v1.1.12 + go.uber.org/zap v1.21.0 + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 + gomodules.xyz/jsonpatch/v2 v2.2.0 + k8s.io/api v0.25.0 + k8s.io/apiextensions-apiserver v0.25.0 + k8s.io/apimachinery v0.25.0 + k8s.io/client-go v0.25.0 + k8s.io/component-base v0.25.0 + k8s.io/klog/v2 v2.70.1 + k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed + sigs.k8s.io/yaml v1.3.0 +) + +require ( + cloud.google.com/go v0.97.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..93d6173c44 --- /dev/null +++ b/go.sum @@ -0,0 +1,1035 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjUQTriCw= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0= +go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w= +go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= +k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= +k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= +k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= +k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= +k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/apiserver v0.25.0/go.mod h1:BKwsE+PTC+aZK+6OJQDPr0v6uS91/HWxX7evElAH6xo= +k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= +k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/code-generator v0.25.0/go.mod h1:B6jZgI3DvDFAualltPitbYMQ74NjaCFxum3YeKZZ+3w= +k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= +k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/apidiff.sh b/hack/apidiff.sh new file mode 100755 index 0000000000..0167486da1 --- /dev/null +++ b/hack/apidiff.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname ${BASH_SOURCE})/common.sh + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +cd "${REPO_ROOT}" + +APIDIFF="hack/tools/bin/go-apidiff" + +header_text "fetching and building go-apidiff" +make "${APIDIFF}" + +git status + +header_text "verifying api diff" +header_text "invoking: '${APIDIFF} ${PULL_BASE_SHA} --print-compatible'" +"${APIDIFF}" "${PULL_BASE_SHA}" --print-compatible diff --git a/hack/check-everything.sh b/hack/check-everything.sh new file mode 100755 index 0000000000..de559643cc --- /dev/null +++ b/hack/check-everything.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +hack_dir=$(dirname ${BASH_SOURCE}) +source ${hack_dir}/common.sh + +tmp_root=/tmp +kb_root_dir=$tmp_root/kubebuilder + +# Run verification scripts. +${hack_dir}/verify.sh + +# Envtest. +ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION:-"1.22.0"} + +header_text "installing envtest tools@${ENVTEST_K8S_VERSION} with setup-envtest if necessary" +tmp_bin=/tmp/cr-tests-bin +( + # don't presume to install for the user + cd ${hack_dir}/../tools/setup-envtest + GOBIN=${tmp_bin} go install . +) +export KUBEBUILDER_ASSETS="$(${tmp_bin}/setup-envtest use --use-env -p path "${ENVTEST_K8S_VERSION}")" + +# Run tests. +${hack_dir}/test-all.sh + +header_text "confirming examples compile (via go install)" +go install ${MOD_OPT} ./examples/builtins +go install ${MOD_OPT} ./examples/crd +go install ${MOD_OPT} ./examples/configfile/builtin +go install ${MOD_OPT} ./examples/configfile/custom + +echo "passed" +exit 0 diff --git a/hack/ci-check-everything.sh b/hack/ci-check-everything.sh new file mode 100755 index 0000000000..39b85e91d8 --- /dev/null +++ b/hack/ci-check-everything.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +export TRACE=1 + +# Not included or existing by default in Prow +export PATH=$(go env GOPATH)/bin:$PATH +mkdir -p $(go env GOPATH)/bin + +$(dirname ${BASH_SOURCE})/check-everything.sh diff --git a/hack/common.sh b/hack/common.sh new file mode 100755 index 0000000000..eff91ba7b7 --- /dev/null +++ b/hack/common.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# Enable tracing in this script off by setting the TRACE variable in your +# environment to any value: +# +# $ TRACE=1 test.sh +TRACE=${TRACE:-""} +if [ -n "$TRACE" ]; then + set -x +fi + +# check if modules are enabled +(go mod edit -json &>/dev/null) +MODULES_ENABLED=$? + +MOD_OPT="" +MODULES_OPT=${MODULES_OPT:-""} +if [[ -n "${MODULES_OPT}" && $MODULES_ENABLED ]]; then + MOD_OPT="-mod=${MODULES_OPT}" +fi + +# Turn colors in this script off by setting the NO_COLOR variable in your +# environment to any value: +# +# $ NO_COLOR=1 test.sh +NO_COLOR=${NO_COLOR:-""} +if [ -z "$NO_COLOR" ]; then + header=$'\e[1;33m' + reset=$'\e[0m' +else + header='' + reset='' +fi + +function header_text { + echo "$header$*$reset" +} diff --git a/hack/ensure-golangci-lint.sh b/hack/ensure-golangci-lint.sh new file mode 100755 index 0000000000..9e9ef03167 --- /dev/null +++ b/hack/ensure-golangci-lint.sh @@ -0,0 +1,422 @@ +#!/usr/bin/env bash + +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: This script is copied from from https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh. + +set -e + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -le "$_logp" ] +} +log_tag() { + case $1 in + 0) echo "emerg" ;; + 1) echo "alert" ;; + 2) echo "crit" ;; + 3) echo "err" ;; + 4) echo "warning" ;; + 5) echo "notice" ;; + 6) echo "info" ;; + 7) echo "debug" ;; + *) echo "$1" ;; + esac +} +log_debug() { + log_priority 7 || return 0 + echoerr "$(log_prefix)" "$(log_tag 7)" "$@" +} +log_info() { + log_priority 6 || return 0 + echoerr "$(log_prefix)" "$(log_tag 6)" "$@" +} +log_err() { + log_priority 3 || return 0 + echoerr "$(log_prefix)" "$(log_tag 3)" "$@" +} +log_crit() { + log_priority 2 || return 0 + echoerr "$(log_prefix)" "$(log_tag 2)" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo ${arch} +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;; + *.tar) tar --no-same-owner -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +http_download_curl() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") + else + code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") + fi + if [ "$code" != "200" ]; then + log_debug "http_download_curl received HTTP status $code" + return 1 + fi + return 0 +} +http_download_wget() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + wget -q -O "$local_file" "$source_url" + else + wget -q --header "$header" -O "$local_file" "$source_url" + fi +} +http_download() { + log_debug "http_download $2" + if is_command curl; then + http_download_curl "$@" + return + elif is_command wget; then + http_download_wget "$@" + return + fi + log_crit "http_download unable to find wget or curl" + return 1 +} +http_copy() { + tmp=$(mktemp) + http_download "${tmp}" "$1" "$2" || return 1 + body=$(cat "$tmp") + rm -f "${tmp}" + echo "$body" +} +github_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_copy "$giturl" "Accept:application/json") + test -z "$json" && return 1 + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null <' ${ARTIFACTS}/*; then exit 1; fi +fi diff --git a/hack/tools/go.mod b/hack/tools/go.mod new file mode 100644 index 0000000000..9f52c7e49c --- /dev/null +++ b/hack/tools/go.mod @@ -0,0 +1,8 @@ +module sigs.k8s.io/controller-runtime/hack/tools + +go 1.16 + +require ( + github.com/joelanford/go-apidiff v0.1.0 + sigs.k8s.io/controller-tools v0.7.0 +) diff --git a/hack/tools/go.sum b/hack/tools/go.sum new file mode 100644 index 0000000000..513b2e9254 --- /dev/null +++ b/hack/tools/go.sum @@ -0,0 +1,958 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= +github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joelanford/go-apidiff v0.1.0 h1:bt/247wfLDKFnCC5jYdapR3WY2laJMPB9apfc1U9Idw= +github.com/joelanford/go-apidiff v0.1.0/go.mod h1:wgVWgVCwYYkjcYpJtBnWYkyUYZfVovO3Y5pX49mJsqs= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191004183538-27eeabb02079/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= +k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-tools v0.7.0 h1:iZIz1vEcavyEfxjcTLs1WH/MPf4vhPCtTKhoHqV8/G0= +sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/tools/tools.go b/hack/tools/tools.go new file mode 100644 index 0000000000..481a7c6f01 --- /dev/null +++ b/hack/tools/tools.go @@ -0,0 +1,25 @@ +// +build tools + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +import ( + _ "github.com/joelanford/go-apidiff" + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" +) diff --git a/hack/verify.sh b/hack/verify.sh new file mode 100755 index 0000000000..85006e3f06 --- /dev/null +++ b/hack/verify.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +source $(dirname ${BASH_SOURCE})/common.sh + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +cd "${REPO_ROOT}" + +header_text "running generate" +make generate + +header_text "running golangci-lint" +make lint + +# Only run module verification in CI, otherwise updating +# go module locally (which is a valid operation) causes `make test` to fail. +if [[ -n ${CI} ]]; then + header_text "verifying modules" + make modules verify-modules +fi diff --git a/pkg/builder/builder_suite_test.go b/pkg/builder/builder_suite_test.go new file mode 100644 index 0000000000..5ae6fd8616 --- /dev/null +++ b/pkg/builder/builder_suite_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +func TestBuilder(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "application Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + addCRDToEnvironment(testenv, + testDefaulterGVK, + testValidatorGVK, + testDefaultValidatorGVK) + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + // Prevent the metrics listener being created + metrics.DefaultBindAddress = "0" + + webhook.DefaultPort, _, err = addr.Suggest("") + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) + + // Put the DefaultBindAddress back + metrics.DefaultBindAddress = ":8080" + + // Change the webhook.DefaultPort back to the original default. + webhook.DefaultPort = 9443 +}) + +func addCRDToEnvironment(env *envtest.Environment, gvks ...schema.GroupVersionKind) { + for _, gvk := range gvks { + plural, singular := meta.UnsafeGuessKindToResource(gvk) + crd := &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiextensions.k8s.io/v1", + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: plural.Resource + "." + gvk.Group, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gvk.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: plural.Resource, + Singular: singular.Resource, + Kind: gvk.Kind, + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: gvk.Version, + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + } + env.CRDInstallOptions.CRDs = append(env.CRDInstallOptions.CRDs, crd) + } +} diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go new file mode 100644 index 0000000000..efaf069205 --- /dev/null +++ b/pkg/builder/controller.go @@ -0,0 +1,333 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "strings" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Supporting mocking out functions for testing. +var newController = controller.New +var getGvk = apiutil.GVKForObject + +// project represents other forms that the we can use to +// send/receive a given resource (metadata-only, unstructured, etc). +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given. + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into an metadata-only watch. + projectAsMetadata +) + +// Builder builds a Controller. +type Builder struct { + forInput ForInput + ownsInput []OwnsInput + watchesInput []WatchesInput + mgr manager.Manager + globalPredicates []predicate.Predicate + ctrl controller.Controller + ctrlOptions controller.Options + name string +} + +// ControllerManagedBy returns a new controller builder that will be started by the provided Manager. +func ControllerManagedBy(m manager.Manager) *Builder { + return &Builder{mgr: m} +} + +// ForInput represents the information set by For method. +type ForInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection + err error +} + +// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// This is the equivalent of calling +// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}). +func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { + if blder.forInput.object != nil { + blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") + return blder + } + input := ForInput{object: object} + for _, opt := range opts { + opt.ApplyToFor(&input) + } + + blder.forInput = input + return blder +} + +// OwnsInput represents the information set by Owns method. +type OwnsInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to +// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling +// Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}). +func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { + input := OwnsInput{object: object} + for _, opt := range opts { + opt.ApplyToOwns(&input) + } + + blder.ownsInput = append(blder.ownsInput, input) + return blder +} + +// WatchesInput represents the information set by Watches method. +type WatchesInput struct { + src source.Source + eventhandler handler.EventHandler + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using +// Owns or For instead of Watches directly. +// Specified predicates are registered only for given source. +func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + input := WatchesInput{src: src, eventhandler: eventhandler} + for _, opt := range opts { + opt.ApplyToWatches(&input) + } + + blder.watchesInput = append(blder.watchesInput, input) + return blder +} + +// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually +// trigger reconciliations. For example, filtering on whether the resource version has changed. +// Given predicate is added for all watched objects. +// Defaults to the empty list. +func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { + blder.globalPredicates = append(blder.globalPredicates, p) + return blder +} + +// WithOptions overrides the controller options use in doController. Defaults to empty. +func (blder *Builder) WithOptions(options controller.Options) *Builder { + blder.ctrlOptions = options + return blder +} + +// WithLogConstructor overrides the controller options's LogConstructor. +func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder { + blder.ctrlOptions.LogConstructor = logConstructor + return blder +} + +// Named sets the name of the controller to the given name. The name shows up +// in metrics, among other things, and thus should be a prometheus compatible name +// (underscores and alphanumeric characters only). +// +// By default, controllers are named using the lowercase version of their kind. +func (blder *Builder) Named(name string) *Builder { + blder.name = name + return blder +} + +// Complete builds the Application Controller. +func (blder *Builder) Complete(r reconcile.Reconciler) error { + _, err := blder.Build(r) + return err +} + +// Build builds the Application Controller and returns the Controller it created. +func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { + if r == nil { + return nil, fmt.Errorf("must provide a non-nil Reconciler") + } + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") + } + if blder.forInput.err != nil { + return nil, blder.forInput.err + } + // Checking the reconcile type exist or not + if blder.forInput.object == nil { + return nil, fmt.Errorf("must provide an object for reconciliation") + } + + // Set the ControllerManagedBy + if err := blder.doController(r); err != nil { + return nil, err + } + + // Set the Watch + if err := blder.doWatch(); err != nil { + return nil, err + } + + return blder.ctrl, nil +} + +func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + +func (blder *Builder) doWatch() error { + // Reconcile type + typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForObject{} + allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + + // Watches the managed types + for _, own := range blder.ownsInput { + typeForSrc, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForOwner{ + OwnerType: blder.forInput.object, + IsController: true, + } + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, own.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + } + + // Do the watch requests + for _, w := range blder.watchesInput { + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, w.predicates...) + + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type, w.objectProjection) + if err != nil { + return err + } + srckind.Type = typeForSrc + } + + if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil { + return err + } + } + return nil +} + +func (blder *Builder) getControllerName(gvk schema.GroupVersionKind) string { + if blder.name != "" { + return blder.name + } + return strings.ToLower(gvk.Kind) +} + +func (blder *Builder) doController(r reconcile.Reconciler) error { + globalOpts := blder.mgr.GetControllerOptions() + + ctrlOptions := blder.ctrlOptions + if ctrlOptions.Reconciler == nil { + ctrlOptions.Reconciler = r + } + + // Retrieve the GVK from the object we're reconciling + // to prepopulate logger information, and to optionally generate a default name. + gvk, err := getGvk(blder.forInput.object, blder.mgr.GetScheme()) + if err != nil { + return err + } + + // Setup concurrency. + if ctrlOptions.MaxConcurrentReconciles == 0 { + groupKind := gvk.GroupKind().String() + + if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 { + ctrlOptions.MaxConcurrentReconciles = concurrency + } + } + + // Setup cache sync timeout. + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil { + ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout + } + + controllerName := blder.getControllerName(gvk) + + // Setup the logger. + if ctrlOptions.LogConstructor == nil { + log := blder.mgr.GetLogger().WithValues( + "controller", controllerName, + "controllerGroup", gvk.Group, + "controllerKind", gvk.Kind, + ) + + ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + gvk.Kind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + // Build the controller and return. + blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions) + return err +} diff --git a/pkg/builder/controller_test.go b/pkg/builder/controller_test.go new file mode 100644 index 0000000000..56c1a41458 --- /dev/null +++ b/pkg/builder/controller_test.go @@ -0,0 +1,628 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +type typedNoop struct{} + +func (typedNoop) Reconcile(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +type testLogger struct { + logr.Logger +} + +func (l *testLogger) Init(logr.RuntimeInfo) { +} + +func (l *testLogger) Enabled(int) bool { + return true +} + +func (l *testLogger) Info(level int, msg string, keysAndValues ...interface{}) { +} + +func (l *testLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { + return l +} + +func (l *testLogger) WithName(name string) logr.LogSink { + return l +} + +var _ = Describe("application", func() { + BeforeEach(func() { + newController = controller.New + }) + + noop := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }) + + Describe("New", func() { + It("should return success if given valid objects", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should return error if given two apiType objects in For function", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + For(&appsv1.Deployment{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("For(...) should only be called once, could not assign multiple objects for reconciliation"))) + Expect(instance).To(BeNil()) + }) + + It("should return an error if For function is not called", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("must provide an object for reconciliation"))) + Expect(instance).To(BeNil()) + }) + + It("should return an error if there is no GVK for an object, and thus we can't default the controller name", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a controller with a bad For type") + instance, err := ControllerManagedBy(m). + For(&fakeType{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type builder.fakeType"))) + Expect(instance).To(BeNil()) + + // NB(directxman12): we don't test non-for types, since errors for + // them now manifest on controller.Start, not controller.Watch. Errors on the For type + // manifest when we try to default the controller name, which is good to double check. + }) + + It("should return an error if it cannot create the controller", func() { + newController = func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + return nil, fmt.Errorf("expected error") + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + Expect(instance).To(BeNil()) + }) + + It("should override max concurrent reconcilers during creation of controller", func() { + const maxConcurrentReconciles = 5 + newController = func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + if options.MaxConcurrentReconciles == maxConcurrentReconciles { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("max concurrent reconcilers expected %d but found %d", maxConcurrentReconciles, options.MaxConcurrentReconciles) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override max concurrent reconcilers during creation of controller, when using", func() { + const maxConcurrentReconciles = 10 + newController = func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + if options.MaxConcurrentReconciles == maxConcurrentReconciles { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("max concurrent reconcilers expected %d but found %d", maxConcurrentReconciles, options.MaxConcurrentReconciles) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{ + Controller: v1alpha1.ControllerConfigurationSpec{ + GroupKindConcurrency: map[string]int{ + "ReplicaSet.apps": maxConcurrentReconciles, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override rate limiter during creation of controller", func() { + rateLimiter := workqueue.DefaultItemBasedRateLimiter() + newController = func(name string, mgr manager.Manager, options controller.Options) (controller.Controller, error) { + if options.RateLimiter == rateLimiter { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("rate limiter expected %T but found %T", rateLimiter, options.RateLimiter) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{RateLimiter: rateLimiter}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override logger during creation of controller", func() { + + logger := &testLogger{} + newController = func(name string, mgr manager.Manager, options controller.Options) (controller.Controller, error) { + if options.LogConstructor(nil).GetSink() == logger { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("logger expected %T but found %T", logger, options.LogConstructor) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + WithLogConstructor(func(request *reconcile.Request) logr.Logger { + return logr.New(logger) + }). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should prefer reconciler from options during creation of controller", func() { + newController = func(name string, mgr manager.Manager, options controller.Options) (controller.Controller, error) { + if options.Reconciler != (typedNoop{}) { + return nil, fmt.Errorf("Custom reconciler expected %T but found %T", typedNoop{}, options.Reconciler) + } + return controller.New(name, mgr, options) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{Reconciler: typedNoop{}}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should allow multiple controllers for the same kind", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaultValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + Expect(err).NotTo(HaveOccurred()) + + By("creating the 1st controller") + ctrl1, err := ControllerManagedBy(m). + For(&TestDefaultValidator{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl1).NotTo(BeNil()) + + By("creating the 2nd controller") + ctrl2, err := ControllerManagedBy(m). + For(&TestDefaultValidator{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl2).NotTo(BeNil()) + }) + }) + + Describe("Start with ControllerManagedBy", func() { + It("should Reconcile Owns objects", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}). + Owns(&appsv1.ReplicaSet{}) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + doReconcileTest(ctx, "3", m, false, bldr) + }, 10) + + It("should Reconcile Watches objects", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}). + Watches( // Equivalent of Owns + &source.Kind{Type: &appsv1.ReplicaSet{}}, + &handler.EnqueueRequestForOwner{OwnerType: &appsv1.Deployment{}, IsController: true}) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + doReconcileTest(ctx, "4", m, true, bldr) + }, 10) + }) + + Describe("Set custom predicates", func() { + It("should execute registered predicates only for assigned kind", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + var ( + deployPrctExecuted = false + replicaSetPrctExecuted = false + allPrctExecuted = int64(0) + ) + + deployPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called only for deployment + Expect(e.Object).To(BeAssignableToTypeOf(&appsv1.Deployment{})) + deployPrctExecuted = true + return true + }, + } + + replicaSetPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called only for replicaset + Expect(e.Object).To(BeAssignableToTypeOf(&appsv1.ReplicaSet{})) + replicaSetPrctExecuted = true + return true + }, + } + + allPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called for all registered kinds + Expect(e.Object).Should(Or( + BeAssignableToTypeOf(&appsv1.Deployment{}), + BeAssignableToTypeOf(&appsv1.ReplicaSet{}), + )) + + atomic.AddInt64(&allPrctExecuted, 1) + return true + }, + } + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}, WithPredicates(deployPrct)). + Owns(&appsv1.ReplicaSet{}, WithPredicates(replicaSetPrct)). + WithEventFilter(allPrct) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + doReconcileTest(ctx, "5", m, true, bldr) + + Expect(deployPrctExecuted).To(BeTrue(), "Deploy predicated should be called at least once") + Expect(replicaSetPrctExecuted).To(BeTrue(), "ReplicaSet predicated should be called at least once") + Expect(allPrctExecuted).To(BeNumerically(">=", 2), "Global Predicated should be called at least twice") + }) + }) + + Describe("watching with projections", func() { + var mgr manager.Manager + BeforeEach(func() { + // use a cache that intercepts requests for fully typed objects to + // ensure we use the projected versions + var err error + mgr, err = manager.New(cfg, manager.Options{NewCache: newNonTypedOnlyCache}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should support multiple controllers watching the same metadata kind", func() { + bldr1 := ControllerManagedBy(mgr).For(&appsv1.Deployment{}, OnlyMetadata) + bldr2 := ControllerManagedBy(mgr).For(&appsv1.Deployment{}, OnlyMetadata) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + doReconcileTest(ctx, "6", mgr, true, bldr1, bldr2) + }) + + It("should support watching For, Owns, and Watch as metadata", func() { + statefulSetMaps := make(chan *metav1.PartialObjectMetadata) + + bldr := ControllerManagedBy(mgr). + For(&appsv1.Deployment{}, OnlyMetadata). + Owns(&appsv1.ReplicaSet{}, OnlyMetadata). + Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, + handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + defer GinkgoRecover() + + ometa := o.(*metav1.PartialObjectMetadata) + statefulSetMaps <- ometa + + // Validate that the GVK is not empty when dealing with PartialObjectMetadata objects. + Expect(o.GetObjectKind().GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "StatefulSet", + })) + return nil + }), + OnlyMetadata) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + doReconcileTest(ctx, "8", mgr, true, bldr) + + By("Creating a new stateful set") + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(context.TODO(), set) + Expect(err).NotTo(HaveOccurred()) + + By("Checking that the mapping function has been called") + Eventually(func() bool { + metaSet := <-statefulSetMaps + Expect(metaSet.Name).To(Equal(set.Name)) + Expect(metaSet.Namespace).To(Equal(set.Namespace)) + Expect(metaSet.Labels).To(Equal(set.Labels)) + return true + }).Should(BeTrue()) + }) + }) +}) + +// newNonTypedOnlyCache returns a new cache that wraps the normal cache, +// returning an error if normal, typed objects have informers requested. +func newNonTypedOnlyCache(config *rest.Config, opts cache.Options) (cache.Cache, error) { + normalCache, err := cache.New(config, opts) + if err != nil { + return nil, err + } + return &nonTypedOnlyCache{ + Cache: normalCache, + }, nil +} + +// nonTypedOnlyCache is a cache.Cache that only provides metadata & +// unstructured informers. +type nonTypedOnlyCache struct { + cache.Cache +} + +func (c *nonTypedOnlyCache) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) { + switch obj.(type) { + case (*metav1.PartialObjectMetadata): + return c.Cache.GetInformer(ctx, obj) + default: + return nil, fmt.Errorf("did not want to provide an informer for normal type %T", obj) + } +} +func (c *nonTypedOnlyCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (cache.Informer, error) { + return nil, fmt.Errorf("don't try to sidestep the restriction on informer types by calling GetInformerForKind") +} + +// TODO(directxman12): this function has too many arguments, and the whole +// "nameSuffix" think is a bit of a hack It should be cleaned up significantly by someone with a bit of time. +func doReconcileTest(ctx context.Context, nameSuffix string, mgr manager.Manager, complete bool, blders ...*Builder) { + deployName := "deploy-name-" + nameSuffix + rsName := "rs-name-" + nameSuffix + + By("Creating the application") + ch := make(chan reconcile.Request) + fn := reconcile.Func(func(_ context.Context, req reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + if !strings.HasSuffix(req.Name, nameSuffix) { + // From different test, ignore this request. Etcd is shared across tests. + return reconcile.Result{}, nil + } + ch <- req + return reconcile.Result{}, nil + }) + + for _, blder := range blders { + if complete { + err := blder.Complete(fn) + Expect(err).NotTo(HaveOccurred()) + } else { + var err error + var c controller.Controller + c, err = blder.Build(fn) + Expect(err).NotTo(HaveOccurred()) + Expect(c).NotTo(BeNil()) + } + } + + By("Starting the application") + go func() { + defer GinkgoRecover() + Expect(mgr.Start(ctx)).NotTo(HaveOccurred()) + By("Stopping the application") + }() + + By("Creating a Deployment") + // Expect a Reconcile when the Deployment is managedObjects. + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: deployName, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for the Deployment Reconcile") + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) + + By("Creating a ReplicaSet") + // Expect a Reconcile when an Owned object is managedObjects. + t := true + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: rsName, + Labels: dep.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{ + { + Name: deployName, + Kind: "Deployment", + APIVersion: "apps/v1", + Controller: &t, + UID: dep.UID, + }, + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: dep.Spec.Selector, + Template: dep.Spec.Template, + }, + } + err = mgr.GetClient().Create(context.TODO(), rs) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for the ReplicaSet Reconcile") + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) +} + +var _ runtime.Object = &fakeType{} + +type fakeType struct { + metav1.TypeMeta + metav1.ObjectMeta +} + +func (*fakeType) GetObjectKind() schema.ObjectKind { return nil } +func (*fakeType) DeepCopyObject() runtime.Object { return nil } diff --git a/pkg/builder/doc.go b/pkg/builder/doc.go new file mode 100644 index 0000000000..e4df1b709f --- /dev/null +++ b/pkg/builder/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder wraps other controller-runtime libraries and exposes simple +// patterns for building common Controllers. +// +// Projects built with the builder package can trivially be rebased on top of the underlying +// packages if the project requires more customized behavior in the future. +package builder + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("builder") diff --git a/pkg/builder/example_test.go b/pkg/builder/example_test.go new file mode 100644 index 0000000000..955c46b562 --- /dev/null +++ b/pkg/builder/example_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder_test + +import ( + "context" + "fmt" + "os" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func ExampleBuilder_metadata_only() { + logf.SetLogger(zap.New()) + + var log = logf.Log.WithName("builder-examples") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + cl := mgr.GetClient() + err = builder. + ControllerManagedBy(mgr). // Create the ControllerManagedBy + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}, builder.OnlyMetadata). // ReplicaSet owns Pods created by it, and caches them as metadata only + Complete(reconcile.Func(func(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := cl.Get(ctx, req.NamespacedName, rs) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // List the Pods matching the PodTemplate Labels, but only their metadata + var podsMeta metav1.PartialObjectMetadataList + err = cl.List(ctx, &podsMeta, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(podsMeta.Items)) + err = cl.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + })) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + +// This example creates a simple application ControllerManagedBy that is configured for ReplicaSets and Pods. +// +// * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into +// ReplicaSetReconciler. +// +// * Start the application. +func ExampleBuilder() { + logf.SetLogger(zap.New()) + + var log = logf.Log.WithName("builder-examples") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = builder. + ControllerManagedBy(mgr). // Create the ControllerManagedBy + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it + Complete(&ReplicaSetReconciler{}) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + +// ReplicaSetReconciler is a simple ControllerManagedBy example implementation. +type ReplicaSetReconciler struct { + client.Client +} + +// Implement the business logic: +// This function will be called when there is a change to a ReplicaSet or a Pod with an OwnerReference +// to a ReplicaSet. +// +// * Read the ReplicaSet +// * Read the Pods +// * Set a Label on the ReplicaSet with the Pod count. +func (a *ReplicaSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := a.Get(ctx, req.NamespacedName, rs) + if err != nil { + return reconcile.Result{}, err + } + + // List the Pods matching the PodTemplate Labels + pods := &corev1.PodList{} + err = a.List(ctx, pods, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return reconcile.Result{}, err + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(pods.Items)) + err = a.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (a *ReplicaSetReconciler) InjectClient(c client.Client) error { + a.Client = c + return nil +} diff --git a/pkg/builder/example_webhook_test.go b/pkg/builder/example_webhook_test.go new file mode 100644 index 0000000000..63333a2478 --- /dev/null +++ b/pkg/builder/example_webhook_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder_test + +import ( + "os" + + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + examplegroup "sigs.k8s.io/controller-runtime/examples/crd/pkg" +) + +// examplegroup.ChaosPod has implemented both admission.Defaulter and +// admission.Validator interfaces. +var _ admission.Defaulter = &examplegroup.ChaosPod{} +var _ admission.Validator = &examplegroup.ChaosPod{} + +// This example use webhook builder to create a simple webhook that is managed +// by a manager for CRD ChaosPod. And then start the manager. +func ExampleWebhookBuilder() { + var log = logf.Log.WithName("webhookbuilder-example") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = builder. + WebhookManagedBy(mgr). // Create the WebhookManagedBy + For(&examplegroup.ChaosPod{}). // ChaosPod is a CRD. + Complete() + if err != nil { + log.Error(err, "could not create webhook") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} diff --git a/pkg/builder/options.go b/pkg/builder/options.go new file mode 100644 index 0000000000..c738ba7d10 --- /dev/null +++ b/pkg/builder/options.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// {{{ "Functional" Option Interfaces + +// ForOption is some configuration that modifies options for a For request. +type ForOption interface { + // ApplyToFor applies this configuration to the given for input. + ApplyToFor(*ForInput) +} + +// OwnsOption is some configuration that modifies options for a owns request. +type OwnsOption interface { + // ApplyToOwns applies this configuration to the given owns input. + ApplyToOwns(*OwnsInput) +} + +// WatchesOption is some configuration that modifies options for a watches request. +type WatchesOption interface { + // ApplyToWatches applies this configuration to the given watches options. + ApplyToWatches(*WatchesInput) +} + +// }}} + +// {{{ Multi-Type Options + +// WithPredicates sets the given predicates list. +func WithPredicates(predicates ...predicate.Predicate) Predicates { + return Predicates{ + predicates: predicates, + } +} + +// Predicates filters events before enqueuing the keys. +type Predicates struct { + predicates []predicate.Predicate +} + +// ApplyToFor applies this configuration to the given ForInput options. +func (w Predicates) ApplyToFor(opts *ForInput) { + opts.predicates = w.predicates +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (w Predicates) ApplyToOwns(opts *OwnsInput) { + opts.predicates = w.predicates +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (w Predicates) ApplyToWatches(opts *WatchesInput) { + opts.predicates = w.predicates +} + +var _ ForOption = &Predicates{} +var _ OwnsOption = &Predicates{} +var _ WatchesOption = &Predicates{} + +// }}} + +// {{{ For & Owns Dual-Type options + +// asProjection configures the projection (currently only metadata) on the input. +// Currently only metadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts *WatchesInput) { + opts.objectProjection = objectProjection(p) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + // + // When watching a resource with OnlyMetadata, for example the v1.Pod, you + // should not Get and List using the v1.Pod type. Instead, you should use + // the special metav1.PartialObjectMetadata type. + // + // ❌ Incorrect: + // + // pod := &v1.Pod{} + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // ✅ Correct: + // + // pod := &metav1.PartialObjectMetadata{} + // pod.SetGroupVersionKind(schema.GroupVersionKind{ + // Group: "", + // Version: "v1", + // Kind: "Pod", + // }) + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // In the first case, controller-runtime will create another cache for the + // concrete type on top of the metadata cache; this increases memory + // consumption and leads to race conditions as caches are not in sync. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} diff --git a/pkg/builder/webhook.go b/pkg/builder/webhook.go new file mode 100644 index 0000000000..534e6d64cd --- /dev/null +++ b/pkg/builder/webhook.go @@ -0,0 +1,216 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "errors" + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + withDefaulter admission.CustomDefaulter + withValidator admission.CustomValidator + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config + recoverPanic bool +} + +// WebhookManagedBy allows inform its manager.Manager. +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + blder.apiType = apiType + return blder +} + +// WithDefaulter takes a admission.WithDefaulter interface, a MutatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithDefaulter(defaulter admission.CustomDefaulter) *WebhookBuilder { + blder.withDefaulter = defaulter + return blder +} + +// WithValidator takes a admission.WithValidator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) *WebhookBuilder { + blder.withValidator = validator + return blder +} + +// RecoverPanic indicates whether the panic caused by webhook should be recovered. +func (blder *WebhookBuilder) RecoverPanic() *WebhookBuilder { + blder.recoverPanic = true + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) registerWebhooks() error { + typ, err := blder.getType() + if err != nil { + return err + } + + // Create webhook(s) for each type + blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme()) + if err != nil { + return err + } + + blder.registerDefaultingWebhook() + blder.registerValidatingWebhook() + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return nil +} + +// registerDefaultingWebhook registers a defaulting webhook if th. +func (blder *WebhookBuilder) registerDefaultingWebhook() { + mwh := blder.getDefaultingWebhook() + if mwh != nil { + path := generateMutatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } +} + +func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { + if defaulter := blder.withDefaulter; defaulter != nil { + return admission.WithCustomDefaulter(blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) + } + if defaulter, ok := blder.apiType.(admission.Defaulter); ok { + return admission.DefaultingWebhookFor(defaulter).WithRecoverPanic(blder.recoverPanic) + } + log.Info( + "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerValidatingWebhook() { + vwh := blder.getValidatingWebhook() + if vwh != nil { + path := generateValidatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } +} + +func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { + if validator := blder.withValidator; validator != nil { + return admission.WithCustomValidator(blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) + } + if validator, ok := blder.apiType.(admission.Validator); ok { + return admission.ValidatingWebhookFor(validator).WithRecoverPanic(blder.recoverPanic) + } + log.Info( + "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "GVK", blder.gvk) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + } + log.Info("Conversion webhook enabled", "GVK", blder.gvk) + } + + return nil +} + +func (blder *WebhookBuilder) getType() (runtime.Object, error) { + if blder.apiType != nil { + return blder.apiType, nil + } + return nil, errors.New("For() must be called with a valid object") +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} diff --git a/pkg/builder/webhook_test.go b/pkg/builder/webhook_test.go new file mode 100644 index 0000000000..d4f74d15b1 --- /dev/null +++ b/pkg/builder/webhook_test.go @@ -0,0 +1,921 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ = Describe("webhook", func() { + Describe("New", func() { + Context("v1 AdmissionReview", func() { + runTests("v1") + }) + Context("v1beta1 AdmissionReview", func() { + runTests("v1beta1") + }) + }) +}) + +func runTests(admissionReviewVersion string) { + var stop chan struct{} + + BeforeEach(func() { + stop = make(chan struct{}) + newController = controller.New + }) + + AfterEach(func() { + close(stop) + }) + + It("should scaffold a defaulting webhook if the type implements the Defaulter interface", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":1 + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaulterGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + + By("sending a request to a validating webhook path that doesn't exist") + path = generateValidatePath(testDefaulterGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should scaffold a defaulting webhook which recovers from panics", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{Panic: true}). + RecoverPanic(). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":1, + "panic":true + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaulterGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":500`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"message":"panic: injected panic [recovered]`)) + }) + + It("should scaffold a defaulting webhook with a custom defaulter", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + WithDefaulter(&TestCustomDefaulter{}). + For(&TestDefaulter{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":1 + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaulterGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + + By("sending a request to a validating webhook path that doesn't exist") + path = generateValidatePath(testDefaulterGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should scaffold a validating webhook if the type implements the Validator interface", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path that doesn't exist") + path := generateMutatePath(testValidatorGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + }) + + It("should scaffold a validating webhook which recovers from panics", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{Panic: true}). + RecoverPanic(). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":2, + "panic":true + } + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a validating webhook path") + path := generateValidatePath(testValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":500`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"message":"panic: injected panic [recovered]`)) + }) + + It("should scaffold a validating webhook with a custom validator", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + WithValidator(&TestCustomValidator{}). + For(&TestValidator{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path that doesn't exist") + path := generateMutatePath(testValidatorGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + }) + + It("should scaffold defaulting and validating webhooks if the type implements both Defaulter and Validator interfaces", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaultValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaultValidator{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestDefaultValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testdefaultvalidator" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":1 + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaultValidatorGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testDefaultValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + }) + + It("should scaffold a validating webhook if the type implements the Validator interface to validate deletes", func() { + By("creating a controller manager") + ctx, cancel := context.WithCancel(context.Background()) + + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"DELETE", + "object":null, + "oldObject":{ + "replica":1 + } + } +}`) + + cancel() + // TODO: we may want to improve it to make it be able to inject dependencies, + // but not always try to load certs and return not found error. + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a validating webhook path to check for failed delete") + path := generateValidatePath(testValidatorGVK) + req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + + reader = strings.NewReader(`{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"DELETE", + "object":null, + "oldObject":{ + "replica":0 + } + } +}`) + By("sending a request to a validating webhook path with correct request") + path = generateValidatePath(testValidatorGVK) + req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux.ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + }) +} + +// TestDefaulter. +var _ runtime.Object = &TestDefaulter{} + +const testDefaulterKind = "TestDefaulter" + +type TestDefaulter struct { + Replica int `json:"replica,omitempty"` + Panic bool `json:"panic,omitempty"` +} + +var testDefaulterGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: testDefaulterKind} + +func (d *TestDefaulter) GetObjectKind() schema.ObjectKind { return d } +func (d *TestDefaulter) DeepCopyObject() runtime.Object { + return &TestDefaulter{ + Replica: d.Replica, + } +} + +func (d *TestDefaulter) GroupVersionKind() schema.GroupVersionKind { + return testDefaulterGVK +} + +func (d *TestDefaulter) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaulterList{} + +type TestDefaulterList struct{} + +func (*TestDefaulterList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaulterList) DeepCopyObject() runtime.Object { return nil } + +func (d *TestDefaulter) Default() { + if d.Panic { + panic("injected panic") + } + if d.Replica < 2 { + d.Replica = 2 + } +} + +// TestValidator. +var _ runtime.Object = &TestValidator{} + +const testValidatorKind = "TestValidator" + +type TestValidator struct { + Replica int `json:"replica,omitempty"` + Panic bool `json:"panic,omitempty"` +} + +var testValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: testValidatorKind} + +func (v *TestValidator) GetObjectKind() schema.ObjectKind { return v } +func (v *TestValidator) DeepCopyObject() runtime.Object { + return &TestValidator{ + Replica: v.Replica, + } +} + +func (v *TestValidator) GroupVersionKind() schema.GroupVersionKind { + return testValidatorGVK +} + +func (v *TestValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestValidatorList{} + +type TestValidatorList struct{} + +func (*TestValidatorList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestValidatorList) DeepCopyObject() runtime.Object { return nil } + +var _ admission.Validator = &TestValidator{} + +func (v *TestValidator) ValidateCreate() error { + if v.Panic { + panic("injected panic") + } + if v.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + return nil +} + +func (v *TestValidator) ValidateUpdate(old runtime.Object) error { + if v.Panic { + panic("injected panic") + } + if v.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + if oldObj, ok := old.(*TestValidator); !ok { + return fmt.Errorf("the old object is expected to be %T", oldObj) + } else if v.Replica < oldObj.Replica { + return fmt.Errorf("new replica %v should not be fewer than old replica %v", v.Replica, oldObj.Replica) + } + return nil +} + +func (v *TestValidator) ValidateDelete() error { + if v.Panic { + panic("injected panic") + } + if v.Replica > 0 { + return errors.New("number of replica should be less than or equal to 0 to delete") + } + return nil +} + +// TestDefaultValidator. +var _ runtime.Object = &TestDefaultValidator{} + +type TestDefaultValidator struct { + metav1.TypeMeta + metav1.ObjectMeta + + Replica int `json:"replica,omitempty"` +} + +var testDefaultValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaultValidator"} + +func (dv *TestDefaultValidator) GetObjectKind() schema.ObjectKind { return dv } +func (dv *TestDefaultValidator) DeepCopyObject() runtime.Object { + return &TestDefaultValidator{ + Replica: dv.Replica, + } +} + +func (dv *TestDefaultValidator) GroupVersionKind() schema.GroupVersionKind { + return testDefaultValidatorGVK +} + +func (dv *TestDefaultValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaultValidatorList{} + +type TestDefaultValidatorList struct{} + +func (*TestDefaultValidatorList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaultValidatorList) DeepCopyObject() runtime.Object { return nil } + +func (dv *TestDefaultValidator) Default() { + if dv.Replica < 2 { + dv.Replica = 2 + } +} + +var _ admission.Validator = &TestDefaultValidator{} + +func (dv *TestDefaultValidator) ValidateCreate() error { + if dv.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + return nil +} + +func (dv *TestDefaultValidator) ValidateUpdate(old runtime.Object) error { + if dv.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + return nil +} + +func (dv *TestDefaultValidator) ValidateDelete() error { + if dv.Replica > 0 { + return errors.New("number of replica should be less than or equal to 0 to delete") + } + return nil +} + +// TestCustomDefaulter. + +type TestCustomDefaulter struct{} + +func (*TestCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaulterKind { + return fmt.Errorf("expected Kind TestDefaulter got %q", req.Kind.Kind) + } + + d := obj.(*TestDefaulter) //nolint:ifshort + if d.Replica < 2 { + d.Replica = 2 + } + return nil +} + +var _ admission.CustomDefaulter = &TestCustomDefaulter{} + +// TestCustomValidator. + +type TestCustomValidator struct{} + +func (*TestCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error { + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestValidator) //nolint:ifshort + if v.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + return nil +} + +func (*TestCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error { + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := newObj.(*TestValidator) + old := oldObj.(*TestValidator) //nolint:ifshort + if v.Replica < 0 { + return errors.New("number of replica should be greater than or equal to 0") + } + if v.Replica < old.Replica { + return fmt.Errorf("new replica %v should not be fewer than old replica %v", v.Replica, old.Replica) + } + return nil +} + +func (*TestCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error { + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestValidator) //nolint:ifshort + if v.Replica > 0 { + return errors.New("number of replica should be less than or equal to 0 to delete") + } + return nil +} + +var _ admission.CustomValidator = &TestCustomValidator{} diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 0000000000..3ff41ffe63 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("object-cache") + +// Cache knows how to load Kubernetes objects, fetch informers to request +// to receive events for Kubernetes objects (at a low-level), +// and add indices to fields on the objects stored in the cache. +type Cache interface { + // Cache acts as a client to objects stored in the cache. + client.Reader + + // Cache loads informers and adds field indices. + Informers +} + +// Informers knows how to create or fetch informers for different +// group-version-kinds, and add indices to those informers. It's safe to call +// GetInformer from multiple threads. +type Informers interface { + // GetInformer fetches or constructs an informer for the given object that corresponds to a single + // API kind and resource. + GetInformer(ctx context.Context, obj client.Object) (Informer, error) + + // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead + // of the underlying object. + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) + + // Start runs all the informers known to this cache until the context is closed. + // It blocks. + Start(ctx context.Context) error + + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + WaitForCacheSync(ctx context.Context) bool + + // Informers knows how to add indices to the caches (informers) that it manages. + client.FieldIndexer +} + +// Informer - informer allows you interact with the underlying informer. +type Informer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler toolscache.ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. + HasSynced() bool +} + +// ObjectSelector is an alias name of internal.Selector. +type ObjectSelector internal.Selector + +// SelectorsByObject associate a client.Object's GVK to a field/label selector. +// There is also `DefaultSelector` to set a global default (which will be overridden by +// a more specific setting here, if any). +type SelectorsByObject map[client.Object]ObjectSelector + +// Options are the optional arguments for creating a new InformersMap object. +type Options struct { + // Scheme is the scheme to use for mapping objects to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Resync is the base frequency the informers are resynced. + // Defaults to defaultResyncTime. + // A 10 percent jitter will be added to the Resync period between informers + // So that all informers will not send list requests simultaneously. + Resync *time.Duration + + // Namespace restricts the cache's ListWatch to the desired namespace + // Default watches all namespaces + Namespace string + + // SelectorsByObject restricts the cache's ListWatch to the desired + // fields per GVK at the specified object, the map's value must implement + // Selector [1] using for example a Set [2] + // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector + // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set + SelectorsByObject SelectorsByObject + + // DefaultSelector will be used as selectors for all object types + // that do not have a selector in SelectorsByObject defined. + DefaultSelector ObjectSelector + + // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + + // TransformByObject is a map from GVKs to transformer functions which + // get applied when objects of the transformation are about to be committed + // to cache. + // + // This function is called both for new objects to enter the cache, + // and for updated objects. + TransformByObject TransformByObject + + // DefaultTransform is the transform used for all GVKs which do + // not have an explicit transform func set in TransformByObject + DefaultTransform toolscache.TransformFunc +} + +var defaultResyncTime = 10 * time.Hour + +// New initializes and returns a new Cache. +func New(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) + if err != nil { + return nil, err + } + transformByGVK, err := convertToTransformByKindAndGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) + if err != nil { + return nil, err + } + + im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK, transformByGVK) + return &informerCache{InformersMap: im}, nil +} + +// BuilderWithOptions returns a Cache constructor that will build the a cache +// honoring the options argument, this is useful to specify options like +// SelectorsByObject +// WARNING: If SelectorsByObject is specified, filtered out resources are not +// returned. +// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object +// returned from cache get/list before mutating it. +func BuilderWithOptions(options Options) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + if options.Scheme == nil { + options.Scheme = opts.Scheme + } + if options.Mapper == nil { + options.Mapper = opts.Mapper + } + if options.Resync == nil { + options.Resync = opts.Resync + } + if options.Namespace == "" { + options.Namespace = opts.Namespace + } + if opts.Resync == nil { + opts.Resync = options.Resync + } + + return New(config, options) + } +} + +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + // Use the default Kubernetes Scheme if unset + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + // Construct a new Mapper if unset + if opts.Mapper == nil { + var err error + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + log.WithName("setup").Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config") + } + } + + // Default the resync period to 10 hours if unset + if opts.Resync == nil { + opts.Resync = &defaultResyncTime + } + return opts, nil +} + +func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, defaultSelector ObjectSelector, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) { + selectorsByGVK := internal.SelectorsByGVK{} + for object, selector := range selectorsByObject { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { + return nil, err + } + selectorsByGVK[gvk] = internal.Selector(selector) + } + selectorsByGVK[schema.GroupVersionKind{}] = internal.Selector(defaultSelector) + return selectorsByGVK, nil +} + +// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. +type DisableDeepCopyByObject map[client.Object]bool + +var _ client.Object = &ObjectAll{} + +// ObjectAll is the argument to represent all objects' types. +type ObjectAll struct { + client.Object +} + +func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { + disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} + for obj, disable := range disableDeepCopyByObject { + switch obj.(type) { + case ObjectAll, *ObjectAll: + disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable + default: + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK[gvk] = disable + } + } + return disableDeepCopyByGVK, nil +} + +// TransformByObject associate a client.Object's GVK to a transformer function +// to be applied when storing the object into the cache. +type TransformByObject map[client.Object]toolscache.TransformFunc + +func convertToTransformByKindAndGVK(t TransformByObject, defaultTransform toolscache.TransformFunc, scheme *runtime.Scheme) (internal.TransformFuncByObject, error) { + result := internal.NewTransformFuncByObject() + for obj, transformation := range t { + if err := result.Set(obj, scheme, transformation); err != nil { + return nil, err + } + } + result.SetDefault(defaultTransform) + return result, nil +} diff --git a/pkg/cache/cache_suite_test.go b/pkg/cache/cache_suite_test.go new file mode 100644 index 0000000000..2517777d39 --- /dev/null +++ b/pkg/cache/cache_suite_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Cache Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go new file mode 100644 index 0000000000..a84b08e94c --- /dev/null +++ b/pkg/cache/cache_test.go @@ -0,0 +1,1793 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache_test + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + kscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + kcache "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const testNodeOne = "test-node-1" +const testNamespaceOne = "test-namespace-1" +const testNamespaceTwo = "test-namespace-2" +const testNamespaceThree = "test-namespace-3" + +// TODO(community): Pull these helper functions into testenv. +// Restart policy is included to allow indexing on that field. +func createPodWithLabels(name, namespace string, restartPolicy corev1.RestartPolicy, labels map[string]string) client.Object { + three := int64(3) + if labels == nil { + labels = map[string]string{} + } + labels["test-label"] = name + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}, + RestartPolicy: restartPolicy, + ActiveDeadlineSeconds: &three, + }, + } + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = cl.Create(context.Background(), pod) + Expect(err).NotTo(HaveOccurred()) + return pod +} + +func createSvc(name, namespace string, cl client.Client) client.Object { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 1}}, + }, + } + err := cl.Create(context.Background(), svc) + Expect(err).NotTo(HaveOccurred()) + return svc +} + +func createSA(name, namespace string, cl client.Client) client.Object { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + err := cl.Create(context.Background(), sa) + Expect(err).NotTo(HaveOccurred()) + return sa +} + +func createPod(name, namespace string, restartPolicy corev1.RestartPolicy) client.Object { + return createPodWithLabels(name, namespace, restartPolicy, nil) +} + +func deletePod(pod client.Object) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = cl.Delete(context.Background(), pod) + Expect(err).NotTo(HaveOccurred()) +} + +var _ = Describe("Informer Cache", func() { + CacheTest(cache.New, cache.Options{}) +}) +var _ = Describe("Multi-Namespace Informer Cache", func() { + CacheTest(cache.MultiNamespacedCacheBuilder([]string{testNamespaceOne, testNamespaceTwo, "default"}), cache.Options{}) +}) +var _ = Describe("Informer Cache without DeepCopy", func() { + CacheTest(cache.New, cache.Options{UnsafeDisableDeepCopyByObject: cache.DisableDeepCopyByObject{cache.ObjectAll{}: true}}) +}) + +var _ = Describe("Cache with transformers", func() { + var ( + informerCache cache.Cache + informerCacheCtx context.Context + informerCacheCancel context.CancelFunc + knownPod1 client.Object + knownPod2 client.Object + knownPod3 client.Object + knownPod4 client.Object + knownPod5 client.Object + knownPod6 client.Object + ) + + getTransformValue := func(obj client.Object) string { + accessor, err := meta.Accessor(obj) + if err == nil { + annotations := accessor.GetAnnotations() + if val, exists := annotations["transformed"]; exists { + return val + } + } + return "" + } + + BeforeEach(func() { + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) + Expect(cfg).NotTo(BeNil()) + + By("creating three pods") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(testNodeOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceThree, cl) + Expect(err).NotTo(HaveOccurred()) + // Includes restart policy since these objects are indexed on this field. + knownPod1 = createPod("test-pod-1", testNamespaceOne, corev1.RestartPolicyNever) + knownPod2 = createPod("test-pod-2", testNamespaceTwo, corev1.RestartPolicyAlways) + knownPod3 = createPodWithLabels("test-pod-3", testNamespaceTwo, corev1.RestartPolicyOnFailure, map[string]string{"common-label": "common"}) + knownPod4 = createPodWithLabels("test-pod-4", testNamespaceThree, corev1.RestartPolicyNever, map[string]string{"common-label": "common"}) + knownPod5 = createPod("test-pod-5", testNamespaceOne, corev1.RestartPolicyNever) + knownPod6 = createPod("test-pod-6", testNamespaceTwo, corev1.RestartPolicyAlways) + + podGVK := schema.GroupVersionKind{ + Kind: "Pod", + Version: "v1", + } + + knownPod1.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod2.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod3.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod4.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod5.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod6.GetObjectKind().SetGroupVersionKind(podGVK) + + By("creating the informer cache") + informerCache, err = cache.New(cfg, cache.Options{ + DefaultTransform: func(i interface{}) (interface{}, error) { + obj := i.(runtime.Object) + Expect(obj).NotTo(BeNil()) + + accessor, err := meta.Accessor(obj) + Expect(err).To(BeNil()) + annotations := accessor.GetAnnotations() + + if _, exists := annotations["transformed"]; exists { + // Avoid performing transformation multiple times. + return i, nil + } + + if annotations == nil { + annotations = make(map[string]string) + } + annotations["transformed"] = "default" + accessor.SetAnnotations(annotations) + return i, nil + }, + TransformByObject: cache.TransformByObject{ + &corev1.Pod{}: func(i interface{}) (interface{}, error) { + obj := i.(runtime.Object) + Expect(obj).NotTo(BeNil()) + accessor, err := meta.Accessor(obj) + Expect(err).To(BeNil()) + + annotations := accessor.GetAnnotations() + if _, exists := annotations["transformed"]; exists { + // Avoid performing transformation multiple times. + return i, nil + } + + if annotations == nil { + annotations = make(map[string]string) + } + annotations["transformed"] = "explicit" + accessor.SetAnnotations(annotations) + return i, nil + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(informerCacheCtx)).To(BeTrue()) + }) + + AfterEach(func() { + By("cleaning up created pods") + deletePod(knownPod1) + deletePod(knownPod2) + deletePod(knownPod3) + deletePod(knownPod4) + deletePod(knownPod5) + deletePod(knownPod6) + + informerCacheCancel() + }) + + Context("with structured objects", func() { + It("should apply transformers to explicitly specified GVKS", func() { + By("listing pods") + out := corev1.PodList{} + Expect(informerCache.List(context.Background(), &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + + It("should apply default transformer to objects when none is specified", func() { + By("getting the Kubernetes service") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) + + Context("with unstructured objects", func() { + It("should apply transformers to explicitly specified GVKS", func() { + By("listing pods") + out := unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(informerCache.List(context.Background(), &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + + It("should apply default transformer to objects when none is specified", func() { + By("getting the Kubernetes service") + svc := &unstructured.Unstructured{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) + + Context("with metadata-only objects", func() { + It("should apply transformers to explicitly specified GVKS", func() { + By("listing pods") + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(informerCache.List(context.Background(), &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + It("should apply default transformer to objects when none is specified", func() { + By("getting the Kubernetes service") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) +}) + +var _ = Describe("Cache with selectors", func() { + defer GinkgoRecover() + var ( + informerCache cache.Cache + informerCacheCtx context.Context + informerCacheCancel context.CancelFunc + ) + + BeforeEach(func() { + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) + Expect(cfg).NotTo(BeNil()) + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + for idx, namespace := range []string{testNamespaceOne, testNamespaceTwo} { + _ = createSA("test-sa-"+strconv.Itoa(idx), namespace, cl) + _ = createSvc("test-svc-"+strconv.Itoa(idx), namespace, cl) + } + + opts := cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &corev1.ServiceAccount{}: {Field: fields.OneTermEqualSelector("metadata.namespace", testNamespaceOne)}, + }, + DefaultSelector: cache.ObjectSelector{Field: fields.OneTermEqualSelector("metadata.namespace", testNamespaceTwo)}, + } + + By("creating the informer cache") + informerCache, err = cache.New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(informerCacheCtx)).To(BeTrue()) + }) + + AfterEach(func() { + ctx := context.Background() + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + for idx, namespace := range []string{testNamespaceOne, testNamespaceTwo} { + err = cl.Delete(ctx, &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-sa-" + strconv.Itoa(idx)}}) + Expect(err).NotTo(HaveOccurred()) + err = cl.Delete(ctx, &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-svc-" + strconv.Itoa(idx)}}) + Expect(err).NotTo(HaveOccurred()) + } + informerCacheCancel() + }) + + It("Should list serviceaccounts and find exactly one in namespace "+testNamespaceOne, func() { + var sas corev1.ServiceAccountList + err := informerCache.List(informerCacheCtx, &sas) + Expect(err).NotTo(HaveOccurred()) + Expect(len(sas.Items)).To(Equal(1)) + Expect(sas.Items[0].Namespace).To(Equal(testNamespaceOne)) + }) + + It("Should list services and find exactly one in namespace "+testNamespaceTwo, func() { + var svcs corev1.ServiceList + err := informerCache.List(informerCacheCtx, &svcs) + Expect(err).NotTo(HaveOccurred()) + Expect(len(svcs.Items)).To(Equal(1)) + Expect(svcs.Items[0].Namespace).To(Equal(testNamespaceTwo)) + }) +}) + +func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error), opts cache.Options) { + Describe("Cache test", func() { + var ( + informerCache cache.Cache + informerCacheCtx context.Context + informerCacheCancel context.CancelFunc + knownPod1 client.Object + knownPod2 client.Object + knownPod3 client.Object + knownPod4 client.Object + knownPod5 client.Object + knownPod6 client.Object + ) + + BeforeEach(func() { + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) + Expect(cfg).NotTo(BeNil()) + + By("creating three pods") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(testNodeOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(testNamespaceThree, cl) + Expect(err).NotTo(HaveOccurred()) + // Includes restart policy since these objects are indexed on this field. + knownPod1 = createPod("test-pod-1", testNamespaceOne, corev1.RestartPolicyNever) + knownPod2 = createPod("test-pod-2", testNamespaceTwo, corev1.RestartPolicyAlways) + knownPod3 = createPodWithLabels("test-pod-3", testNamespaceTwo, corev1.RestartPolicyOnFailure, map[string]string{"common-label": "common"}) + knownPod4 = createPodWithLabels("test-pod-4", testNamespaceThree, corev1.RestartPolicyNever, map[string]string{"common-label": "common"}) + knownPod5 = createPod("test-pod-5", testNamespaceOne, corev1.RestartPolicyNever) + knownPod6 = createPod("test-pod-6", testNamespaceTwo, corev1.RestartPolicyAlways) + + podGVK := schema.GroupVersionKind{ + Kind: "Pod", + Version: "v1", + } + + knownPod1.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod2.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod3.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod4.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod5.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod6.GetObjectKind().SetGroupVersionKind(podGVK) + + By("creating the informer cache") + informerCache, err = createCacheFunc(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(informerCacheCtx)).To(BeTrue()) + }) + + AfterEach(func() { + By("cleaning up created pods") + deletePod(knownPod1) + deletePod(knownPod2) + deletePod(knownPod3) + deletePod(knownPod4) + deletePod(knownPod5) + deletePod(knownPod6) + + informerCacheCancel() + }) + + Describe("as a Reader", func() { + Context("with structured objects", func() { + It("should be able to list objects that haven't been watched previously", func() { + By("listing all services in the cluster") + listObj := &corev1.ServiceList{} + Expect(informerCache.List(context.Background(), listObj)).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + + It("should be able to get objects that haven't been watched previously", func() { + By("getting the Kubernetes service") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.Name).To(Equal("kubernetes")) + Expect(svc.Namespace).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func() { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := corev1.PodList{} + Expect(informerCache.List(context.Background(), &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"}))).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.Labels["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func() { + By("creating another pod with the same label but different namespace") + anotherPod := createPod("test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := corev1.PodList{} + labels := map[string]string{"test-label": "test-pod-2"} + Expect(informerCache.List(context.Background(), &out, client.MatchingLabels(labels))).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.Labels["test-label"]).To(Equal("test-pod-2")) + } + }) + + if !isPodDisableDeepCopy(opts) { + It("should be able to list objects with GVK populated", func() { + By("listing pods") + out := &corev1.PodList{} + Expect(informerCache.List(context.Background(), out)).To(Succeed()) + + By("verifying that the returned pods have GVK populated") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(SatisfyAny(HaveLen(5), HaveLen(6))) + for _, p := range out.Items { + Expect(p.GroupVersionKind()).To(Equal(corev1.SchemeGroupVersion.WithKind("Pod"))) + } + }) + } + + It("should be able to list objects by namespace", func() { + By("listing pods in test-namespace-1") + listObj := &corev1.PodList{} + Expect(informerCache.List(context.Background(), listObj, + client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + }) + + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func() { + By("retrieving a specific pod from the cache") + out := &corev1.Pod{} + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(knownPod2)) + + By("altering a field in the retrieved pod") + *out.Spec.ActiveDeadlineSeconds = 4 + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func() { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &corev1.Pod{} + Expect(informerCache.Get(context.Background(), podKey, out1)).To(Succeed()) + out2 := &corev1.Pod{} + Expect(informerCache.Get(context.Background(), podKey, out2)).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Labels).Pointer())) + + By("listing pods from the cache twice") + outList1 := &corev1.PodList{} + Expect(informerCache.List(context.Background(), outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &corev1.PodList{} + Expect(informerCache.List(context.Background(), outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(len(outList1.Items)).To(Equal(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].Name <= outList1.Items[j].Name }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].Name <= outList2.Items[j].Name }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Labels).Pointer())) + } + }) + } + + It("should return an error if the object is not found", func() { + By("getting a service that does not exists") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should return an error if getting object in unwatched namespace", func() { + By("getting a service that does not exists") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + + It("should return an error when context is cancelled", func() { + By("cancelling the context") + informerCacheCancel() + + By("listing pods in test-namespace-1 with a cancelled context") + listObj := &corev1.PodList{} + err := informerCache.List(informerCacheCtx, listObj, client.InNamespace(testNamespaceOne)) + + By("verifying that an error is returned") + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should set the Limit option and limit number of objects to Limit when List is called", func() { + opts := &client.ListOptions{Limit: int64(3)} + By("verifying that only Limit (3) number of objects are retrieved from the cache") + listObj := &corev1.PodList{} + Expect(informerCache.List(context.Background(), listObj, opts)).To(Succeed()) + Expect(listObj.Items).Should(HaveLen(3)) + }) + + It("should return a limited result set matching the correct label", func() { + listObj := &corev1.PodList{} + labelOpt := client.MatchingLabels(map[string]string{"common-label": "common"}) + limitOpt := client.Limit(1) + By("verifying that only Limit (1) number of objects are retrieved from the cache") + Expect(informerCache.List(context.Background(), listObj, labelOpt, limitOpt)).To(Succeed()) + Expect(listObj.Items).Should(HaveLen(1)) + }) + }) + + Context("with unstructured objects", func() { + It("should be able to list objects that haven't been watched previously", func() { + By("listing all services in the cluster") + listObj := &unstructured.UnstructuredList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ServiceList", + }) + err := informerCache.List(context.Background(), listObj) + Expect(err).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + It("should be able to get objects that haven't been watched previously", func() { + By("getting the Kubernetes service") + svc := &unstructured.Unstructured{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.GetName()).To(Equal("kubernetes")) + Expect(svc.GetNamespace()).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func() { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) + Expect(err).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func() { + By("creating another pod with the same label but different namespace") + anotherPod := createPod("test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + labels := map[string]string{"test-label": "test-pod-2"} + err := informerCache.List(context.Background(), &out, client.MatchingLabels(labels)) + Expect(err).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + } + }) + + It("should be able to list objects by namespace", func() { + By("listing pods in test-namespace-1") + listObj := &unstructured.UnstructuredList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), listObj, client.InNamespace(testNamespaceOne)) + Expect(err).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.GetNamespace()).To(Equal(testNamespaceOne)) + } + }) + + It("should be able to restrict cache to a namespace", func() { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{Namespace: testNamespaceOne}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing pods in all namespaces") + out := &unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(context.Background(), out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, item := range out.Items { + Expect(item.GetNamespace()).To(Equal(testNamespaceOne)) + } + By("listing all nodes - should still be able to list a cluster-scoped resource") + nodeList := &unstructured.UnstructuredList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(namespacedCache.List(context.Background(), nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + + By("getting a node - should still be able to get a cluster-scoped resource") + node := &unstructured.Unstructured{} + node.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Node", + }) + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(namespacedCache.Get(context.Background(), key1, node)).To(Succeed()) + + By("verifying that the namespace is ignored when getting a cluster-scoped resource") + key2 := client.ObjectKey{Namespace: "random", Name: testNodeOne} + Expect(namespacedCache.Get(context.Background(), key2, node)).To(Succeed()) + }) + + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func() { + By("retrieving a specific pod from the cache") + out := &unstructured.Unstructured{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &unstructured.Unstructured{} + Expect(kscheme.Scheme.Convert(knownPod2, uKnownPod2, nil)).To(Succeed()) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + m, _ := out.Object["spec"].(map[string]interface{}) + m["activeDeadlineSeconds"] = 4 + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func() { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &unstructured.Unstructured{} + out1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(context.Background(), podKey, out1)).To(Succeed()) + out2 := &unstructured.Unstructured{} + out2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(context.Background(), podKey, out2)).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Object).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Object).Pointer())) + + By("listing pods from the cache twice") + outList1 := &unstructured.UnstructuredList{} + outList1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(context.Background(), outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &unstructured.UnstructuredList{} + outList2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(context.Background(), outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(len(outList1.Items)).To(Equal(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].GetName() <= outList1.Items[j].GetName() }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].GetName() <= outList2.Items[j].GetName() }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Object).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Object).Pointer())) + } + }) + } + + It("should return an error if the object is not found", func() { + By("getting a service that does not exists") + svc := &unstructured.Unstructured{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + It("should return an error if getting object in unwatched namespace", func() { + By("getting a service that does not exists") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + It("test multinamespaced cache for cluster scoped resources", func() { + By("creating a multinamespaced cache to watch specific namespaces") + multi := cache.MultiNamespacedCacheBuilder([]string{"default", testNamespaceOne}) + m, err := multi(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting it for sync") + go func() { + defer GinkgoRecover() + Expect(m.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(m.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("should be able to fetch cluster scoped resource") + node := &corev1.Node{} + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(m.Get(context.Background(), key1, node)).To(Succeed()) + + By("verifying if the cluster scoped resources are not duplicated") + nodeList := &unstructured.UnstructuredList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(m.List(context.Background(), nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + Expect(len(nodeList.Items)).To(BeEquivalentTo(1)) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to list objects that haven't been watched previously", func() { + By("listing all services in the cluster") + listObj := &metav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ServiceList", + }) + err := informerCache.List(context.Background(), listObj) + Expect(err).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + It("should be able to get objects that haven't been watched previously", func() { + By("getting the Kubernetes service") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.GetName()).To(Equal("kubernetes")) + Expect(svc.GetNamespace()).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func() { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) + Expect(err).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func() { + By("creating another pod with the same label but different namespace") + anotherPod := createPod("test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + labels := map[string]string{"test-label": "test-pod-2"} + err := informerCache.List(context.Background(), &out, client.MatchingLabels(labels)) + Expect(err).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + } + }) + + It("should be able to list objects by namespace", func() { + By("listing pods in test-namespace-1") + listObj := &metav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), listObj, client.InNamespace(testNamespaceOne)) + Expect(err).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + }) + + It("should be able to restrict cache to a namespace", func() { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{Namespace: testNamespaceOne}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing pods in all namespaces") + out := &metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(context.Background(), out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, item := range out.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + By("listing all nodes - should still be able to list a cluster-scoped resource") + nodeList := &metav1.PartialObjectMetadataList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(namespacedCache.List(context.Background(), nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + + By("getting a node - should still be able to get a cluster-scoped resource") + node := &metav1.PartialObjectMetadata{} + node.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Node", + }) + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(namespacedCache.Get(context.Background(), key1, node)).To(Succeed()) + + By("verifying that the namespace is ignored when getting a cluster-scoped resource") + key2 := client.ObjectKey{Namespace: "random", Name: testNodeOne} + Expect(namespacedCache.Get(context.Background(), key2, node)).To(Succeed()) + }) + + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func() { + By("retrieving a specific pod from the cache") + out := &metav1.PartialObjectMetadata{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &metav1.PartialObjectMetadata{} + knownPod2.(*corev1.Pod).ObjectMeta.DeepCopyInto(&uKnownPod2.ObjectMeta) + uKnownPod2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + out.Labels["foo"] = "bar" + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func() { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &metav1.PartialObjectMetadata{} + out1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(context.Background(), podKey, out1)).To(Succeed()) + out2 := &metav1.PartialObjectMetadata{} + out2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(context.Background(), podKey, out2)).To(Succeed()) + + By("verifying the pods have the same pointer addresses") + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Labels).Pointer())) + + By("listing pods from the cache twice") + outList1 := &metav1.PartialObjectMetadataList{} + outList1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(context.Background(), outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &metav1.PartialObjectMetadataList{} + outList2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(context.Background(), outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(len(outList1.Items)).To(Equal(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].Name <= outList1.Items[j].Name }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].Name <= outList2.Items[j].Name }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Labels).Pointer())) + } + }) + } + + It("should return an error if the object is not found", func() { + By("getting a service that does not exists") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + It("should return an error if getting object in unwatched namespace", func() { + By("getting a service that does not exists") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + }) + type selectorsTestCase struct { + fieldSelectors map[string]string + labelSelectors map[string]string + expectedPods []string + } + DescribeTable(" and cache with selectors", func(tc selectorsTestCase) { + By("creating the cache") + builder := cache.BuilderWithOptions( + cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &corev1.Pod{}: { + Label: labels.Set(tc.labelSelectors).AsSelector(), + Field: fields.Set(tc.fieldSelectors).AsSelector(), + }, + }, + }, + ) + informer, err := builder(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("Checking with structured") + obtainedStructuredPodList := corev1.PodList{} + Expect(informer.List(context.Background(), &obtainedStructuredPodList)).To(Succeed()) + Expect(obtainedStructuredPodList.Items).Should(WithTransform(func(pods []corev1.Pod) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.Name) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + + By("Checking with unstructured") + obtainedUnstructuredPodList := unstructured.UnstructuredList{} + obtainedUnstructuredPodList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(context.Background(), &obtainedUnstructuredPodList) + Expect(err).To(Succeed()) + Expect(obtainedUnstructuredPodList.Items).Should(WithTransform(func(pods []unstructured.Unstructured) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.GetName()) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + + By("Checking with metadata") + obtainedMetadataPodList := metav1.PartialObjectMetadataList{} + obtainedMetadataPodList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(context.Background(), &obtainedMetadataPodList) + Expect(err).To(Succeed()) + Expect(obtainedMetadataPodList.Items).Should(WithTransform(func(pods []metav1.PartialObjectMetadata) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.Name) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + }, + Entry("when selectors are empty it has to inform about all the pods", selectorsTestCase{ + fieldSelectors: map[string]string{}, + labelSelectors: map[string]string{}, + expectedPods: []string{"test-pod-1", "test-pod-2", "test-pod-3", "test-pod-4", "test-pod-5", "test-pod-6"}, + }), + Entry("when field matches one pod it has to inform about it", selectorsTestCase{ + fieldSelectors: map[string]string{"metadata.name": "test-pod-2"}, + expectedPods: []string{"test-pod-2"}, + }), + Entry("when field matches multiple pods it has to inform about all of them", selectorsTestCase{ + fieldSelectors: map[string]string{"metadata.namespace": testNamespaceTwo}, + expectedPods: []string{"test-pod-2", "test-pod-3", "test-pod-6"}, + }), + Entry("when label matches one pod it has to inform about it", selectorsTestCase{ + labelSelectors: map[string]string{"test-label": "test-pod-4"}, + expectedPods: []string{"test-pod-4"}, + }), + Entry("when label matches multiple pods it has to inform about all of them", selectorsTestCase{ + labelSelectors: map[string]string{"common-label": "common"}, + expectedPods: []string{"test-pod-3", "test-pod-4"}, + }), + Entry("when label and field matches one pod it has to inform about about it", selectorsTestCase{ + labelSelectors: map[string]string{"common-label": "common"}, + fieldSelectors: map[string]string{"metadata.namespace": testNamespaceTwo}, + expectedPods: []string{"test-pod-3"}, + }), + Entry("when label does not match it does not has to inform", selectorsTestCase{ + labelSelectors: map[string]string{"new-label": "new"}, + expectedPods: []string{}, + }), + Entry("when field does not match it does not has to inform", selectorsTestCase{ + fieldSelectors: map[string]string{"metadata.namespace": "new"}, + expectedPods: []string{}, + }), + ) + }) + Describe("as an Informer", func() { + Context("with structured objects", func() { + It("should be able to get informer for the object", func() { + By("getting a shared index informer for a pod") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + sii, err := informerCache.GetInformer(context.TODO(), pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + + By("verifying the object is received on the channel") + Eventually(out).Should(Receive(Equal(pod))) + }) + It("should be able to get an informer by group/version/kind", func() { + By("getting an shared index informer for gvk = core/v1/pod") + gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + sii, err := informerCache.GetInformerForKind(context.TODO(), gvk) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-gvk", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + + By("verifying the object is received on the channel") + Eventually(out).Should(Receive(Equal(pod))) + }) + It("should be able to index an object field then retrieve objects by that field", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &corev1.Pod{} + indexFunc := func(obj client.Object) []string { + return []string{string(obj.(*corev1.Pod).Spec.RestartPolicy)} + } + Expect(informer.IndexField(context.TODO(), pod, "spec.restartPolicy", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &corev1.PodList{} + Expect(informer.List(context.Background(), listObj, + client.MatchingFields{"spec.restartPolicy": "OnFailure"})).To(Succeed()) + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.Name).To(Equal("test-pod-3")) + }) + + It("should allow for get informer to be cancelled", func() { + By("creating a context and cancelling it") + informerCacheCancel() + + By("getting a shared index informer for a pod with a cancelled context") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + sii, err := informerCache.GetInformer(informerCacheCtx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should allow getting an informer by group/version/kind to be cancelled", func() { + By("creating a context and cancelling it") + informerCacheCancel() + + By("getting an shared index informer for gvk = core/v1/pod with a cancelled context") + gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + sii, err := informerCache.GetInformerForKind(informerCacheCtx, gvk) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should be able not to change indexer values after indexing cluster-scope objects", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the Namespace objects with fixed values before starting") + pod := &corev1.Namespace{} + indexerValues := []string{"a", "b", "c"} + fieldName := "fixedValues" + indexFunc := func(obj client.Object) []string { + return indexerValues + } + Expect(informer.IndexField(context.TODO(), pod, fieldName, indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing Namespaces with fixed indexer") + listObj := &corev1.NamespaceList{} + Expect(informer.List(context.Background(), listObj, + client.MatchingFields{fieldName: "a"})).To(Succeed()) + Expect(listObj.Items).NotTo(BeZero()) + + By("verifying the indexing does not change fixed returned values") + Expect(indexerValues).Should(HaveLen(3)) + Expect(indexerValues[0]).To(Equal("a")) + Expect(indexerValues[1]).To(Equal("b")) + Expect(indexerValues[2]).To(Equal("c")) + }) + }) + Context("with unstructured objects", func() { + It("should be able to get informer for the object", func() { + By("getting a shared index informer for a pod") + + pod := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "nginx", + "image": "nginx", + }, + }, + }, + }, + } + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(context.TODO(), pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + + By("verifying the object is received on the channel") + Eventually(out).Should(Receive(Equal(pod))) + }, 3) + + It("should be able to index an object field then retrieve objects by that field", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &unstructured.Unstructured{} + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + indexFunc := func(obj client.Object) []string { + s, ok := obj.(*unstructured.Unstructured).Object["spec"] + if !ok { + return []string{} + } + m, ok := s.(map[string]interface{}) + if !ok { + return []string{} + } + return []string{fmt.Sprintf("%v", m["restartPolicy"])} + } + Expect(informer.IndexField(context.TODO(), pod, "spec.restartPolicy", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &unstructured.UnstructuredList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(context.Background(), listObj, + client.MatchingFields{"spec.restartPolicy": "OnFailure"}) + Expect(err).To(Succeed()) + + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetName()).To(Equal("test-pod-3")) + }, 3) + + It("should allow for get informer to be cancelled", func() { + By("cancelling the context") + informerCacheCancel() + + By("getting a shared index informer for a pod with a cancelled context") + pod := &unstructured.Unstructured{} + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(informerCacheCtx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to get informer for the object", func() { + By("getting a shared index informer for a pod") + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + + podMeta := &metav1.PartialObjectMetadata{} + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + podMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + sii, err := informerCache.GetInformer(context.TODO(), podMeta) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + // re-copy the result in so that we can match on it properly + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + + By("verifying the object's metadata is received on the channel") + Eventually(out).Should(Receive(Equal(podMeta))) + }, 3) + + It("should be able to index an object field then retrieve objects by that field", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &metav1.PartialObjectMetadata{} + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + indexFunc := func(obj client.Object) []string { + metadata := obj.(*metav1.PartialObjectMetadata) + return []string{metadata.Labels["test-label"]} + } + Expect(informer.IndexField(context.TODO(), pod, "metadata.labels.test-label", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(informerCacheCtx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(informerCacheCtx)).NotTo(BeFalse()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &metav1.PartialObjectMetadataList{} + gvk := schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + } + listObj.SetGroupVersionKind(gvk) + err = informer.List(context.Background(), listObj, + client.MatchingFields{"metadata.labels.test-label": "test-pod-3"}) + Expect(err).To(Succeed()) + + By("verifying that the GVK has been preserved for the list object") + Expect(listObj.GroupVersionKind()).To(Equal(gvk)) + + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetName()).To(Equal("test-pod-3")) + + By("verifying that the GVK has been preserved for the item in the list") + Expect(actual.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + })) + }, 3) + + It("should allow for get informer to be cancelled", func() { + By("creating a context and cancelling it") + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + By("getting a shared index informer for a pod with a cancelled context") + pod := &metav1.PartialObjectMetadata{} + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + }) + }) + }) +} + +// ensureNamespace installs namespace of a given name if not exists. +func ensureNamespace(namespace string, client client.Client) error { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + } + err := client.Create(context.TODO(), &ns) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +func ensureNode(name string, client client.Client) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + } + err := client.Create(context.TODO(), &node) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +//nolint:interfacer +func isKubeService(svc metav1.Object) bool { + // grumble grumble linters grumble grumble + return svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" +} + +func isPodDisableDeepCopy(opts cache.Options) bool { + if d, ok := opts.UnsafeDisableDeepCopyByObject[&corev1.Pod{}]; ok { + return d + } else if d, ok = opts.UnsafeDisableDeepCopyByObject[cache.ObjectAll{}]; ok { + return d + } else if d, ok = opts.UnsafeDisableDeepCopyByObject[&cache.ObjectAll{}]; ok { + return d + } + return false +} diff --git a/pkg/cache/doc.go b/pkg/cache/doc.go new file mode 100644 index 0000000000..e1742ac0f3 --- /dev/null +++ b/pkg/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides object caches that act as caching client.Reader +// instances and help drive Kubernetes-object-based event handlers. +package cache diff --git a/pkg/cache/informer_cache.go b/pkg/cache/informer_cache.go new file mode 100644 index 0000000000..08e4e6df59 --- /dev/null +++ b/pkg/cache/informer_cache.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "reflect" + "strings" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +var ( + _ Informers = &informerCache{} + _ client.Reader = &informerCache{} + _ Cache = &informerCache{} +) + +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + +// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +type informerCache struct { + *internal.InformersMap +} + +// Get implements Reader. +func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ip.Scheme) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.Get(ctx, key, out) +} + +// List implements Reader. +func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + + return cache.Reader.List(ctx, out, opts...) +} + +// objectTypeForListObject tries to find the runtime.Object and associated GVK +// for a single object corresponding to the passed-in list type. We need them +// because they are used as cache map key. +func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ip.Scheme) + if err != nil { + return nil, nil, err + } + + // we need the non-list GVK, so chop off the "List" from the end of the kind + if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + _, isUnstructured := list.(*unstructured.UnstructuredList) + var cacheTypeObj runtime.Object + if isUnstructured { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + cacheTypeObj = u + } else { + itemsPtr, err := apimeta.GetItemsPtr(list) + if err != nil { + return nil, nil, err + } + // http://knowyourmeme.com/memes/this-is-fine + elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() + if elemType.Kind() != reflect.Ptr { + elemType = reflect.PtrTo(elemType) + } + + cacheTypeValue := reflect.Zero(elemType) + var ok bool + cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) + if !ok { + return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) + } + } + + return &gvk, cacheTypeObj, nil +} + +// GetInformerForKind returns the informer for the GroupVersionKind. +func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + // Map the gvk to an object + obj, err := ip.Scheme.New(gvk) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// GetInformer returns the informer for the obj. +func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ip.Scheme) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock. +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + +// IndexField adds an indexer to the underlying cache, using extraction function to get +// value(s) from the given field. This index can then be used by passing a field selector +// to List. For one-to-one compatibility with "normal" field selectors, only return one value. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. +func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ip.GetInformer(ctx, obj) + if err != nil { + return err + } + return indexByField(informer, field, extractValue) +} + +func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { + indexFunc := func(objRaw interface{}) ([]string, error) { + // TODO(directxman12): check if this is the correct type? + obj, isObj := objRaw.(client.Object) + if !isObj { + return nil, fmt.Errorf("object of type %T is not an Object", objRaw) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + ns := meta.GetNamespace() + + rawVals := extractor(obj) + var vals []string + if ns == "" { + // if we're not doubling the keys for the namespaced case, just create a new slice with same length + vals = make([]string, len(rawVals)) + } else { + // if we need to add non-namespaced versions too, double the length + vals = make([]string, len(rawVals)*2) + } + for i, rawVal := range rawVals { + // save a namespaced variant, so that we can ask + // "what are all the object matching a given index *in a given namespace*" + vals[i] = internal.KeyToNamespacedKey(ns, rawVal) + if ns != "" { + // if we have a namespace, also inject a special index key for listing + // regardless of the object namespace + vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal) + } + } + + return vals, nil + } + + return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) +} diff --git a/pkg/cache/informer_cache_test.go b/pkg/cache/informer_cache_test.go new file mode 100644 index 0000000000..6a19eaa366 --- /dev/null +++ b/pkg/cache/informer_cache_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var _ = Describe("informerCache", func() { + It("should not require LeaderElection", func() { + cfg := &rest.Config{} + + mapper, err := apiutil.NewDynamicRESTMapper(cfg, apiutil.WithLazyDiscovery) + Expect(err).ToNot(HaveOccurred()) + + c, err := cache.New(cfg, cache.Options{Mapper: mapper}) + Expect(err).ToNot(HaveOccurred()) + + leaderElectionRunnable, ok := c.(manager.LeaderElectionRunnable) + Expect(ok).To(BeTrue()) + Expect(leaderElectionRunnable.NeedLeaderElection()).To(BeFalse()) + }) +}) diff --git a/pkg/cache/informer_cache_unit_test.go b/pkg/cache/informer_cache_unit_test.go new file mode 100644 index 0000000000..6f66e4bd89 --- /dev/null +++ b/pkg/cache/informer_cache_unit_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + crscheme "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + itemPointerSliceTypeGroupName = "jakob.fabian" + itemPointerSliceTypeVersion = "v1" +) + +var _ = Describe("ip.objectTypeForListObject", func() { + ip := &informerCache{ + InformersMap: &internal.InformersMap{Scheme: scheme.Scheme}, + } + + It("should find the object type for unstructured lists", func() { + unstructuredList := &unstructured.UnstructuredList{} + unstructuredList.SetAPIVersion("v1") + unstructuredList.SetKind("PodList") + + gvk, obj, err := ip.objectTypeForListObject(unstructuredList) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal("")) + Expect(gvk.Version).To(Equal("v1")) + Expect(gvk.Kind).To(Equal("Pod")) + referenceUnstructured := &unstructured.Unstructured{} + referenceUnstructured.SetGroupVersionKind(*gvk) + Expect(obj).To(Equal(referenceUnstructured)) + + }) + + It("should find the object type of a list with a slice of literals items field", func() { + gvk, obj, err := ip.objectTypeForListObject(&corev1.PodList{}) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal("")) + Expect(gvk.Version).To(Equal("v1")) + Expect(gvk.Kind).To(Equal("Pod")) + var referencePod *corev1.Pod + Expect(obj).To(Equal(referencePod)) + + }) + + It("should find the object type of a list with a slice of pointers items field", func() { + By("registering the type", func() { + ip.Scheme = runtime.NewScheme() + err := (&crscheme.Builder{ + GroupVersion: schema.GroupVersion{Group: itemPointerSliceTypeGroupName, Version: itemPointerSliceTypeVersion}, + }). + Register( + &controllertest.UnconventionalListType{}, + &controllertest.UnconventionalListTypeList{}, + ).AddToScheme(ip.Scheme) + Expect(err).To(BeNil()) + }) + + By("calling objectTypeForListObject", func() { + gvk, obj, err := ip.objectTypeForListObject(&controllertest.UnconventionalListTypeList{}) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal(itemPointerSliceTypeGroupName)) + Expect(gvk.Version).To(Equal(itemPointerSliceTypeVersion)) + Expect(gvk.Kind).To(Equal("UnconventionalListType")) + var referenceObject *controllertest.UnconventionalListType + Expect(obj).To(Equal(referenceObject)) + }) + }) +}) diff --git a/pkg/cache/informertest/fake_cache.go b/pkg/cache/informertest/fake_cache.go new file mode 100644 index 0000000000..da3bf8e0d4 --- /dev/null +++ b/pkg/cache/informertest/fake_cache.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informertest + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" +) + +var _ cache.Cache = &FakeInformers{} + +// FakeInformers is a fake implementation of Informers. +type FakeInformers struct { + InformersByGVK map[schema.GroupVersionKind]toolscache.SharedIndexInformer + Scheme *runtime.Scheme + Error error + Synced *bool +} + +// GetInformerForKind implements Informers. +func (c *FakeInformers) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (cache.Informer, error) { + if c.Scheme == nil { + c.Scheme = scheme.Scheme + } + obj, err := c.Scheme.New(gvk) + if err != nil { + return nil, err + } + return c.informerFor(gvk, obj) +} + +// FakeInformerForKind implements Informers. +func (c *FakeInformers) FakeInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (*controllertest.FakeInformer, error) { + if c.Scheme == nil { + c.Scheme = scheme.Scheme + } + obj, err := c.Scheme.New(gvk) + if err != nil { + return nil, err + } + i, err := c.informerFor(gvk, obj) + if err != nil { + return nil, err + } + return i.(*controllertest.FakeInformer), nil +} + +// GetInformer implements Informers. +func (c *FakeInformers) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) { + if c.Scheme == nil { + c.Scheme = scheme.Scheme + } + gvks, _, err := c.Scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + return c.informerFor(gvk, obj) +} + +// WaitForCacheSync implements Informers. +func (c *FakeInformers) WaitForCacheSync(ctx context.Context) bool { + if c.Synced == nil { + return true + } + return *c.Synced +} + +// FakeInformerFor implements Informers. +func (c *FakeInformers) FakeInformerFor(obj runtime.Object) (*controllertest.FakeInformer, error) { + if c.Scheme == nil { + c.Scheme = scheme.Scheme + } + gvks, _, err := c.Scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + i, err := c.informerFor(gvk, obj) + if err != nil { + return nil, err + } + return i.(*controllertest.FakeInformer), nil +} + +func (c *FakeInformers) informerFor(gvk schema.GroupVersionKind, _ runtime.Object) (toolscache.SharedIndexInformer, error) { + if c.Error != nil { + return nil, c.Error + } + if c.InformersByGVK == nil { + c.InformersByGVK = map[schema.GroupVersionKind]toolscache.SharedIndexInformer{} + } + informer, ok := c.InformersByGVK[gvk] + if ok { + return informer, nil + } + + c.InformersByGVK[gvk] = &controllertest.FakeInformer{} + return c.InformersByGVK[gvk], nil +} + +// Start implements Informers. +func (c *FakeInformers) Start(ctx context.Context) error { + return c.Error +} + +// IndexField implements Cache. +func (c *FakeInformers) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + return nil +} + +// Get implements Cache. +func (c *FakeInformers) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return nil +} + +// List implements Cache. +func (c *FakeInformers) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return nil +} diff --git a/pkg/cache/internal/cache_reader.go b/pkg/cache/internal/cache_reader.go new file mode 100644 index 0000000000..9c2255123c --- /dev/null +++ b/pkg/cache/internal/cache_reader.go @@ -0,0 +1,218 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CacheReader is a client.Reader. +var _ client.Reader = &CacheReader{} + +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. +type CacheReader struct { + // indexer is the underlying indexer wrapped by this cache. + indexer cache.Indexer + + // groupVersionKind is the group-version-kind of the resource. + groupVersionKind schema.GroupVersionKind + + // scopeName is the scope of the resource (namespaced or cluster-scoped). + scopeName apimeta.RESTScopeName + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + disableDeepCopy bool +} + +// Get checks the indexer for the object and writes a copy of it if found. +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + if c.scopeName == apimeta.RESTScopeNameRoot { + key.Namespace = "" + } + storeKey := objectKeyToStoreKey(key) + + // Lookup the object from the indexer cache + obj, exists, err := c.indexer.GetByKey(storeKey) + if err != nil { + return err + } + + // Not found, return an error + if !exists { + // Resource gets transformed into Kind in the error anyway, so this is fine + return apierrors.NewNotFound(schema.GroupResource{ + Group: c.groupVersionKind.Group, + Resource: c.groupVersionKind.Kind, + }, key.Name) + } + + // Verify the result is a runtime.Object + if _, isObj := obj.(runtime.Object); !isObj { + // This should never happen + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + } else { + // deep copy to avoid mutating cache + obj = obj.(runtime.Object).DeepCopyObject() + } + + // Copy the value of the item in the cache to the returned value + // TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto) + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(obj) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + if !c.disableDeepCopy { + out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + + return nil +} + +// List lists items out of the indexer and writes them to out. +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { + var objs []interface{} + var err error + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + switch { + case listOpts.FieldSelector != nil: + // TODO(directxman12): support more complicated field selectors by + // combining multiple indices, GetIndexers, etc + field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) + if !requiresExact { + return fmt.Errorf("non-exact field matches are not supported by the cache") + } + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // namespace. + objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + case listOpts.Namespace != "": + objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) + default: + objs = c.indexer.List() + } + if err != nil { + return err + } + var labelSel labels.Selector + if listOpts.LabelSelector != nil { + labelSel = listOpts.LabelSelector + } + + limitSet := listOpts.Limit > 0 + + runtimeObjs := make([]runtime.Object, 0, len(objs)) + for _, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit { + break + } + obj, isObj := item.(runtime.Object) + if !isObj { + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + + var outObj runtime.Object + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + outObj = obj + } else { + outObj = obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + runtimeObjs = append(runtimeObjs, outObj) + } + return apimeta.SetList(out, runtimeObjs) +} + +// objectKeyToStorageKey converts an object key to store key. +// It's akin to MetaNamespaceKeyFunc. It's separate from +// String to allow keeping the key format easily in sync with +// MetaNamespaceKeyFunc. +func objectKeyToStoreKey(k client.ObjectKey) string { + if k.Namespace == "" { + return k.Name + } + return k.Namespace + "/" + k.Name +} + +// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. +func requiresExactMatch(sel fields.Selector) (field, val string, required bool) { + reqs := sel.Requirements() + if len(reqs) != 1 { + return "", "", false + } + req := reqs[0] + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return "", "", false + } + return req.Field, req.Value, true +} + +// FieldIndexName constructs the name of the index over the given field, +// for use with an indexer. +func FieldIndexName(field string) string { + return "field:" + field +} + +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +const allNamespacesNamespace = "__all_namespaces" + +// KeyToNamespacedKey prefixes the given index key with a namespace +// for use in field selector indexes. +func KeyToNamespacedKey(ns string, baseKey string) string { + if ns != "" { + return ns + "/" + baseKey + } + return allNamespacesNamespace + "/" + baseKey +} diff --git a/pkg/cache/internal/deleg_map.go b/pkg/cache/internal/deleg_map.go new file mode 100644 index 0000000000..27f46e3278 --- /dev/null +++ b/pkg/cache/internal/deleg_map.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type InformersMap struct { + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps + + structured *specificInformersMap + unstructured *specificInformersMap + metadata *specificInformersMap + + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme +} + +// NewInformersMap creates a new InformersMap that can create informers for +// both structured and unstructured objects. +func NewInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, +) *InformersMap { + return &InformersMap{ + structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + + Scheme: scheme, + } +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +func (m *InformersMap) Start(ctx context.Context) error { + go m.structured.Start(ctx) + go m.unstructured.Start(ctx) + go m.metadata.Start(ctx) + <-ctx.Done() + return nil +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { + syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) + + if !m.structured.waitForStarted(ctx) { + return false + } + if !m.unstructured.waitForStarted(ctx) { + return false + } + if !m.metadata.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) +} + +// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns +// the Informer from the map. +func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: + return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) + } +} + +// newStructuredInformersMap creates a new InformersMap for structured objects. +func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) +} + +// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. +func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) +} + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) +} diff --git a/pkg/cache/internal/disabledeepcopy.go b/pkg/cache/internal/disabledeepcopy.go new file mode 100644 index 0000000000..54bd7eec93 --- /dev/null +++ b/pkg/cache/internal/disabledeepcopy.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GroupVersionKindAll is the argument to represent all GroupVersionKind types. +var GroupVersionKindAll = schema.GroupVersionKind{} + +// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. +type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool + +// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. +func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { + if d, ok := disableByGVK[gvk]; ok { + return d + } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { + return d + } + return false +} diff --git a/pkg/cache/internal/informers_map.go b/pkg/cache/internal/informers_map.go new file mode 100644 index 0000000000..1524d2316f --- /dev/null +++ b/pkg/cache/internal/informers_map.go @@ -0,0 +1,480 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. +type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) + +// newSpecificInformersMap returns a new specificInformersMap (like +// the generical InformersMap, except that it doesn't implement WaitForCacheSync). +func newSpecificInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, + createListWatcher createListWatcherFunc, +) *specificInformersMap { + ip := &specificInformersMap{ + config: config, + Scheme: scheme, + mapper: mapper, + informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), + codecs: serializer.NewCodecFactory(scheme), + paramCodec: runtime.NewParameterCodec(scheme), + resync: resync, + startWait: make(chan struct{}), + createListWatcher: createListWatcher, + namespace: namespace, + selectors: selectors.forGVK, + disableDeepCopy: disableDeepCopy, + transformers: transformers, + } + return ip +} + +// MapEntry contains the cached data for an Informer. +type MapEntry struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type specificInformersMap struct { + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // informersByGVK is the cache of informers keyed by groupVersionKind + informersByGVK map[schema.GroupVersionKind]*MapEntry + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // stop is the stop channel to stop informers + stop <-chan struct{} + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // start is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // createClient knows how to create a client and a list object, + // and allows for abstracting over the particulars of structured vs + // unstructured objects. + createListWatcher createListWatcherFunc + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + // selectors are the label or field selectors that will be added to the + // ListWatch ListOptions. + selectors func(gvk schema.GroupVersionKind) Selector + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + disableDeepCopy DisableDeepCopyByGVK + + // transform funcs are applied to objects before they are committed to the cache + transformers TransformFuncByObject +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *specificInformersMap) Start(ctx context.Context) { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the stop channel so it can be passed to informers that are added later + ip.stop = ctx.Done() + + // Start each informer + for _, informer := range ip.informersByGVK { + go informer.Informer.Run(ctx.Done()) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() +} + +func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) + for _, informer := range ip.informersByGVK { + syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) + } + return syncedFuncs +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + // Return the informer if it is found + i, started, ok := func() (*MapEntry, bool, bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByGVK[gvk] + return i, ip.started, ok + }() + + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByGVK[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + var lw *cache.ListWatch + lw, err := ip.createListWatcher(gvk, ip) + if err != nil { + return nil, false, err + } + ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { + return nil, false, err + } + + rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + i := &MapEntry{ + Informer: ni, + Reader: CacheReader{ + indexer: ni.GetIndexer(), + groupVersionKind: gvk, + scopeName: rm.Scope.Name(), + disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), + }, + } + ip.informersByGVK[gvk] = i + + // Start the Informer if need by + // TODO(seans): write thorough tests and document what happens here - can you add indexers? + // can you add eventhandlers? + if ip.started { + go i.Informer.Run(ip.stop) + } + return i, ip.started, nil +} + +// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. +func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.Scheme.New(listGVK) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + res := listObj.DeepCopyObject() + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + return res, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + }, + }, nil +} + +func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // grab the metadata client + client, err := metadata.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + + var ( + list *metav1.PartialObjectMetadataList + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } else { + list, err = client.Resource(mapping.Resource).List(ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + + var ( + watcher watch.Interface + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } else { + watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) + } + if watcher != nil { + watcher = newGVKFixupWatcher(gvk, watcher) + } + return watcher, err + }, + }, nil +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// resyncPeriod returns a function which generates a duration each time it is +// invoked; this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func resyncPeriod(resync time.Duration) func() time.Duration { + return func() time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) + } +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/pkg/cache/internal/informers_map_test.go b/pkg/cache/internal/informers_map_test.go new file mode 100644 index 0000000000..32a26fff4e --- /dev/null +++ b/pkg/cache/internal/informers_map_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Test that gvkFixupWatcher behaves like watch.FakeWatcher +// and that it overrides the GVK. +// These tests are adapted from the watch.FakeWatcher tests in: +// https://github.com/kubernetes/kubernetes/blob/adbda068c1808fcc8a64a94269e0766b5c46ec41/staging/src/k8s.io/apimachinery/pkg/watch/watch_test.go#L33-L78 +var _ = Describe("gvkFixupWatcher", func() { + It("behaves like watch.FakeWatcher", func() { + newTestType := func(name string) runtime.Object { + return &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + } + + f := watch.NewFake() + // This is the GVK which we expect the wrapper to set on all the events + expectedGVK := schema.GroupVersionKind{ + Group: "testgroup", + Version: "v1test2", + Kind: "TestKind", + } + gvkfw := newGVKFixupWatcher(expectedGVK, f) + + table := []struct { + t watch.EventType + s runtime.Object + }{ + {watch.Added, newTestType("foo")}, + {watch.Modified, newTestType("qux")}, + {watch.Modified, newTestType("bar")}, + {watch.Deleted, newTestType("bar")}, + {watch.Error, newTestType("error: blah")}, + } + + consumer := func(w watch.Interface) { + for _, expect := range table { + By(fmt.Sprintf("Fixing up watch.EventType: %v and passing it on", expect.t)) + got, ok := <-w.ResultChan() + Expect(ok).To(BeTrue(), "closed early") + Expect(expect.t).To(Equal(got.Type), "unexpected Event.Type or out-of-order Event") + Expect(got.Object).To(BeAssignableToTypeOf(&metav1.PartialObjectMetadata{}), "unexpected Event.Object type") + a := got.Object.(*metav1.PartialObjectMetadata) + Expect(got.Object.GetObjectKind().GroupVersionKind()).To(Equal(expectedGVK), "GVK was not fixed up") + expected := expect.s.DeepCopyObject() + expected.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + actual := a.DeepCopyObject() + actual.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + Expect(actual).To(Equal(expected), "unexpected change to the Object") + } + Eventually(w.ResultChan()).Should(BeClosed()) + } + + sender := func() { + f.Add(newTestType("foo")) + f.Action(watch.Modified, newTestType("qux")) + f.Modify(newTestType("bar")) + f.Delete(newTestType("bar")) + f.Error(newTestType("error: blah")) + f.Stop() + } + + go sender() + consumer(gvkfw) + }) +}) diff --git a/pkg/cache/internal/internal_suite_test.go b/pkg/cache/internal/internal_suite_test.go new file mode 100644 index 0000000000..4e7c2b2de5 --- /dev/null +++ b/pkg/cache/internal/internal_suite_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Cache Internal Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/cache/internal/selector.go b/pkg/cache/internal/selector.go new file mode 100644 index 0000000000..4eff32fb35 --- /dev/null +++ b/pkg/cache/internal/selector.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { + if specific, found := s[gvk]; found { + return specific + } + if defaultSelector, found := s[schema.GroupVersionKind{}]; found { + return defaultSelector + } + + return Selector{} +} + +// Selector specify the label/field selector to fill in ListOptions. +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/pkg/cache/internal/transformers.go b/pkg/cache/internal/transformers.go new file mode 100644 index 0000000000..8cf642c4bd --- /dev/null +++ b/pkg/cache/internal/transformers.go @@ -0,0 +1,50 @@ +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// TransformFuncByObject provides access to the correct transform function for +// any given GVK. +type TransformFuncByObject interface { + Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error + Get(schema.GroupVersionKind) cache.TransformFunc + SetDefault(transformer cache.TransformFunc) +} + +type transformFuncByGVK struct { + defaultTransform cache.TransformFunc + transformers map[schema.GroupVersionKind]cache.TransformFunc +} + +// NewTransformFuncByObject creates a new TransformFuncByObject instance. +func NewTransformFuncByObject() TransformFuncByObject { + return &transformFuncByGVK{ + transformers: make(map[schema.GroupVersionKind]cache.TransformFunc), + defaultTransform: nil, + } +} + +func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { + t.defaultTransform = transformer +} + +func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + t.transformers[gvk] = transformer + return nil +} + +func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { + if val, ok := t.transformers[gvk]; ok { + return val + } + return t.defaultTransform +} diff --git a/pkg/cache/multi_namespace_cache.go b/pkg/cache/multi_namespace_cache.go new file mode 100644 index 0000000000..64514c0c55 --- /dev/null +++ b/pkg/cache/multi_namespace_cache.go @@ -0,0 +1,331 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) + +// a new global namespaced cache to handle cluster scoped resources. +const globalCache = "_cluster-scope" + +// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. +// This will scope the cache to a list of namespaces. Listing for all namespaces +// will list for all the namespaces that this knows about. By default this will create +// a global cache for cluster scoped resource. Note that this is not intended +// to be used for excluding namespaces, this is better done via a Predicate. Also note that +// you may face performance issues when using this with a high number of namespaces. +func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + + caches := map[string]Cache{} + + // create a cache for cluster scoped resources + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) + } + + for _, ns := range namespaces { + opts.Namespace = ns + c, err := New(config, opts) + if err != nil { + return nil, err + } + caches[ns] = c + } + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + } +} + +// multiNamespaceCache knows how to handle multiple namespaced caches +// Use this feature when scoping permissions for your +// operator to a list of namespaces instead of watching every namespace +// in the cluster. +type multiNamespaceCache struct { + namespaceToCache map[string]Cache + Scheme *runtime.Scheme + RESTMapper apimeta.RESTMapper + clusterCache Cache +} + +var _ Cache = &multiNamespaceCache{} + +// Methods for multiNamespaceCache to conform to the Informers interface. +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) Start(ctx context.Context) error { + // start global cache + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + log.Error(err, "cluster scoped cache failed to start") + } + }() + + // start namespaced caches + for ns, cache := range c.namespaceToCache { + go func(ns string, cache Cache) { + err := cache.Start(ctx) + if err != nil { + log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + } + }(ns, cache) + } + + <-ctx.Done() + return nil +} + +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range c.namespaceToCache { + if s := cache.WaitForCacheSync(ctx); !s { + synced = s + } + } + + // check if cluster scoped cache has synced + if !c.clusterCache.WaitForCacheSync(ctx) { + synced = false + } + return synced +} + +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil //nolint:nilerr + } + + if !isNamespaced { + return c.clusterCache.IndexField(ctx, obj, field, extractValue) + } + + for _, cache := range c.namespaceToCache { + if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { + return err + } + } + return nil +} + +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + return c.clusterCache.Get(ctx, key, obj) + } + + cache, ok := c.namespaceToCache[key.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) + } + return cache.Get(ctx, key, obj) +} + +// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + return c.clusterCache.List(ctx, list, opts...) + } + + if listOpts.Namespace != corev1.NamespaceAll { + cache, ok := c.namespaceToCache[listOpts.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + } + return cache.List(ctx, list, opts...) + } + + listAccessor, err := apimeta.ListAccessor(list) + if err != nil { + return err + } + + allItems, err := apimeta.ExtractList(list) + if err != nil { + return err + } + + limitSet := listOpts.Limit > 0 + + var resourceVersion string + for _, cache := range c.namespaceToCache { + listObj := list.DeepCopyObject().(client.ObjectList) + err = cache.List(ctx, listObj, &listOpts) + if err != nil { + return err + } + items, err := apimeta.ExtractList(listObj) + if err != nil { + return err + } + accessor, err := apimeta.ListAccessor(listObj) + if err != nil { + return fmt.Errorf("object: %T must be a list type", list) + } + allItems = append(allItems, items...) + // The last list call should have the most correct resource version. + resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } + } + listAccessor.SetResourceVersion(resourceVersion) + + return apimeta.SetList(list, allItems) +} + +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. +type multiNamespaceInformer struct { + namespaceToInformer map[string]Informer +} + +var _ Informer = &multiNamespaceInformer{} + +// AddEventHandler adds the handler to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandler(handler) + } +} + +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + } +} + +// AddIndexers adds the indexer for each namespaced informer. +func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { + for _, informer := range i.namespaceToInformer { + err := informer.AddIndexers(indexers) + if err != nil { + return err + } + } + return nil +} + +// HasSynced checks if each namespaced informer has synced. +func (i *multiNamespaceInformer) HasSynced() bool { + for _, informer := range i.namespaceToInformer { + if ok := informer.HasSynced(); !ok { + return ok + } + } + return true +} diff --git a/pkg/certwatcher/certwatcher.go b/pkg/certwatcher/certwatcher.go new file mode 100644 index 0000000000..1030013db3 --- /dev/null +++ b/pkg/certwatcher/certwatcher.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "context" + "crypto/tls" + "sync" + + "github.com/fsnotify/fsnotify" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.RWMutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + + certPath string + keyPath string +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.RLock() + defer cw.RUnlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + files := []string{cw.certPath, cw.keyPath} + + for _, f := range files { + if err := cw.watcher.Add(f); err != nil { + return err + } + } + + go cw.Watch() + + log.Info("Starting certificate watcher") + + // Block until the context is done. + <-ctx.Done() + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + log.Error(err, "certificate watch error") + } + } +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + metrics.ReadCertificateTotal.Inc() + cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + cw.Lock() + cw.currentCert = &cert + cw.Unlock() + + log.Info("Updated current TLS certificate") + + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + log.V(1).Info("certificate event", "event", event) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + log.Error(err, "error re-watching file") + } + } + + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "error re-reading certificate") + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op&fsnotify.Write == fsnotify.Write +} + +func isCreate(event fsnotify.Event) bool { + return event.Op&fsnotify.Create == fsnotify.Create +} + +func isRemove(event fsnotify.Event) bool { + return event.Op&fsnotify.Remove == fsnotify.Remove +} diff --git a/pkg/certwatcher/certwatcher_suite_test.go b/pkg/certwatcher/certwatcher_suite_test.go new file mode 100644 index 0000000000..e1e9861ea5 --- /dev/null +++ b/pkg/certwatcher/certwatcher_suite_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "os" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + certPath = "testdata/tls.crt" + keyPath = "testdata/tls.key" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "CertWatcher Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}, 60) + +var _ = AfterSuite(func() { + for _, file := range []string{certPath, keyPath} { + _ = os.Remove(file) + } +}, 60) diff --git a/pkg/certwatcher/certwatcher_test.go b/pkg/certwatcher/certwatcher_test.go new file mode 100644 index 0000000000..8ca27b27b1 --- /dev/null +++ b/pkg/certwatcher/certwatcher_test.go @@ -0,0 +1,243 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus/testutil" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" +) + +var _ = Describe("CertWatcher", func() { + var _ = Describe("certwatcher New", func() { + It("should errors without cert/key", func() { + _, err := certwatcher.New("", "") + Expect(err).ToNot(BeNil()) + }) + }) + + var _ = Describe("certwatcher Start", func() { + var ( + ctx context.Context + ctxCancel context.CancelFunc + watcher *certwatcher.CertWatcher + ) + + BeforeEach(func() { + ctx, ctxCancel = context.WithCancel(context.Background()) + + err := writeCerts(certPath, keyPath, "127.0.0.1") + Expect(err).To(BeNil()) + + Eventually(func() error { + for _, file := range []string{certPath, keyPath} { + _, err := os.ReadFile(file) + if err != nil { + return err + } + continue + } + + return nil + }).Should(Succeed()) + + watcher, err = certwatcher.New(certPath, keyPath) + Expect(err).To(BeNil()) + }) + + startWatcher := func() (done <-chan struct{}) { + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(watcher.Start(ctx)).To(Succeed()) + }() + // wait till we read first cert + Eventually(func() error { + err := watcher.ReadCertificate() + return err + }).Should(Succeed()) + return doneCh + } + + It("should read the initial cert/key", func() { + doneCh := startWatcher() + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should reload currentCert when changed", func() { + doneCh := startWatcher() + + firstcert, _ := watcher.GetCertificate(nil) + + err := writeCerts(certPath, keyPath, "192.168.0.1") + Expect(err).To(BeNil()) + + Eventually(func() bool { + secondcert, _ := watcher.GetCertificate(nil) + first := firstcert.PrivateKey.(*rsa.PrivateKey) + return first.Equal(secondcert.PrivateKey) + }).ShouldNot(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + Context("prometheus metric read_certificate_total", func() { + var readCertificateTotalBefore float64 + var readCertificateErrorsBefore float64 + + BeforeEach(func() { + readCertificateTotalBefore = testutil.ToFloat64(metrics.ReadCertificateTotal) + readCertificateErrorsBefore = testutil.ToFloat64(metrics.ReadCertificateErrors) + }) + + It("should get updated on successful certificate read", func() { + doneCh := startWatcher() + + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter != readCertificateTotalBefore+1.0 { + return fmt.Errorf("metric read certificate total expected: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + } + return nil + }, "4s").Should(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should get updated on read certificate errors", func() { + doneCh := startWatcher() + + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter != readCertificateTotalBefore+1.0 { + return fmt.Errorf("metric read certificate total expected: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + } + readCertificateTotalBefore = readCertificateTotalAfter + return nil + }, "4s").Should(Succeed()) + + Expect(os.Remove(keyPath)).To(BeNil()) + + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter != readCertificateTotalBefore+1.0 { + return fmt.Errorf("metric read certificate total expected: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + } + return nil + }, "4s").Should(Succeed()) + Eventually(func() error { + readCertificateErrorsAfter := testutil.ToFloat64(metrics.ReadCertificateErrors) + if readCertificateErrorsAfter != readCertificateErrorsBefore+1.0 { + return fmt.Errorf("metric read certificate errors expected: %v and got: %v", readCertificateErrorsBefore+1.0, readCertificateErrorsAfter) + } + return nil + }, "4s").Should(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + }) + }) +}) + +func writeCerts(certPath, keyPath, ip string) error { + var priv interface{} + var err error + priv, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + + keyUsage := x509.KeyUsageDigitalSignature + if _, isRSA := priv.(*rsa.PrivateKey); isRSA { + keyUsage |= x509.KeyUsageKeyEncipherment + } + + notBefore := time.Now() + notAfter := notBefore.Add(1 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Kubernetes"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: keyUsage, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + template.IPAddresses = append(template.IPAddresses, net.ParseIP(ip)) + + privkey := priv.(*rsa.PrivateKey) + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privkey.PublicKey, priv) + if err != nil { + return err + } + + certOut, err := os.Create(certPath) + if err != nil { + return err + } + if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return err + } + if err := certOut.Close(); err != nil { + return err + } + + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return err + } + if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + return err + } + return keyOut.Close() +} diff --git a/pkg/certwatcher/doc.go b/pkg/certwatcher/doc.go new file mode 100644 index 0000000000..40c2fc0bfb --- /dev/null +++ b/pkg/certwatcher/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package certwatcher is a helper for reloading Certificates from disk to be used +with tls servers. It provides a helper func `GetCertificate` which can be +called from `tls.Config` and passed into your tls.Listener. For a detailed +example server view pkg/webhook/server.go. +*/ +package certwatcher diff --git a/pkg/certwatcher/example_test.go b/pkg/certwatcher/example_test.go new file mode 100644 index 0000000000..6e9bcdfb95 --- /dev/null +++ b/pkg/certwatcher/example_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" +) + +type sampleServer struct { +} + +func Example() { + // Setup Context + ctx := ctrl.SetupSignalHandler() + + // Initialize a new cert watcher with cert/key pair + watcher, err := certwatcher.New("ssl/tls.crt", "ssl/tls.key") + if err != nil { + panic(err) + } + + // Start goroutine with certwatcher running fsnotify against supplied certdir + go func() { + if err := watcher.Start(ctx); err != nil { + panic(err) + } + }() + + // Setup TLS listener using GetCertficate for fetching the cert when changes + listener, err := tls.Listen("tcp", "localhost:9443", &tls.Config{ + GetCertificate: watcher.GetCertificate, + MinVersion: tls.VersionTLS12, + }) + if err != nil { + panic(err) + } + + // Initialize your tls server + srv := &http.Server{ + Handler: &sampleServer{}, + ReadHeaderTimeout: 5 * time.Second, + } + + // Start goroutine for handling server shutdown. + go func() { + <-ctx.Done() + if err := srv.Shutdown(context.Background()); err != nil { + panic(err) + } + }() + + // Serve t + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + panic(err) + } +} + +func (s *sampleServer) ServeHTTP(http.ResponseWriter, *http.Request) { +} diff --git a/pkg/certwatcher/metrics/metrics.go b/pkg/certwatcher/metrics/metrics.go new file mode 100644 index 0000000000..05869eff03 --- /dev/null +++ b/pkg/certwatcher/metrics/metrics.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReadCertificateTotal is a prometheus counter metrics which holds the total + // number of certificate reads. + ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_total", + Help: "Total number of certificate reads", + }) + + // ReadCertificateErrors is a prometheus counter metrics which holds the total + // number of errors from certificate read. + ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_errors_total", + Help: "Total number of certificate read errors", + }) +) + +func init() { + metrics.Registry.MustRegister( + ReadCertificateTotal, + ReadCertificateErrors, + ) +} diff --git a/pkg/certwatcher/testdata/.gitkeep b/pkg/certwatcher/testdata/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/client/apiutil/apimachinery.go b/pkg/client/apiutil/apimachinery.go new file mode 100644 index 0000000000..c92b0eaaec --- /dev/null +++ b/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/pkg/client/apiutil/apiutil_suite_test.go b/pkg/client/apiutil/apiutil_suite_test.go new file mode 100644 index 0000000000..f617195724 --- /dev/null +++ b/pkg/client/apiutil/apiutil_suite_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "API Utilities Test Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var cfg *rest.Config + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + // for things that technically need a rest.Config for defaulting, but don't actually use them + cfg = &rest.Config{} +}, 60) diff --git a/pkg/client/apiutil/dynamicrestmapper.go b/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 0000000000..8b7c1c4b68 --- /dev/null +++ b/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,290 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "sync" + "sync/atomic" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + inited uint32 + initMtx sync.Mutex +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + // skip init if drm is not lazy or has initialized + if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { + return nil + } + + drm.initMtx.Lock() + defer drm.initMtx.Unlock() + if drm.inited == 0 { + if err = drm.setStaticMapper(); err == nil { + atomic.StoreUint32(&drm.inited, 1) + } + } + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + needsReload := meta.IsNoMatchError(err) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = meta.IsNoMatchError(err) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/pkg/client/apiutil/dynamicrestmapper_test.go b/pkg/client/apiutil/dynamicrestmapper_test.go new file mode 100644 index 0000000000..6b88a3aa5f --- /dev/null +++ b/pkg/client/apiutil/dynamicrestmapper_test.go @@ -0,0 +1,304 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + targetGVK = schema.GroupVersionKind{Group: "test.kubebuilder.io", Version: "v1beta1", Kind: "SomeCR"} + targetGVR = targetGVK.GroupVersion().WithResource("somecrs") + targetMapping = meta.RESTMapping{Resource: targetGVR, GroupVersionKind: targetGVK, Scope: meta.RESTScopeNamespace} + + secondGVK = schema.GroupVersionKind{Group: "test.kubebuilder.io", Version: "v1beta1", Kind: "OtherCR"} + secondGVR = secondGVK.GroupVersion().WithResource("othercrs") + secondMapping = meta.RESTMapping{Resource: secondGVR, GroupVersionKind: secondGVK, Scope: meta.RESTScopeNamespace} +) + +var _ = Describe("Dynamic REST Mapper", func() { + var mapper meta.RESTMapper + var addToMapper func(*meta.DefaultRESTMapper) + var lim *rate.Limiter + + BeforeEach(func() { + var err error + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + baseMapper.Add(targetGVK, meta.RESTScopeNamespace) + } + + lim = rate.NewLimiter(rate.Limit(5), 5) + mapper, err = NewDynamicRESTMapper(cfg, WithLimiter(lim), WithCustomMapper(func() (meta.RESTMapper, error) { + baseMapper := meta.NewDefaultRESTMapper(nil) + addToMapper(baseMapper) + + return baseMapper, nil + })) + Expect(err).NotTo(HaveOccurred()) + }) + + var mapperTest = func(callWithTarget func() error, callWithOther func() error) { + It("should read from the cache when possible", func() { + By("reading successfully once when we expect to succeed") + Expect(callWithTarget()).To(Succeed()) + + By("causing requerying to fail, and trying again") + addToMapper = func(_ *meta.DefaultRESTMapper) { + Fail("shouldn't have re-queried") + } + Expect(callWithTarget()).To(Succeed()) + }) + + It("should reload if not present in the cache", func() { + By("reading target successfully once") + Expect(callWithTarget()).To(Succeed()) + + By("reading other not successfully") + count := 0 + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + count++ + baseMapper.Add(targetGVK, meta.RESTScopeNamespace) + } + Expect(callWithOther()).To(beNoMatchError()) + Expect(count).To(Equal(1), "should reload exactly once") + + By("reading both successfully now") + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + baseMapper.Add(targetGVK, meta.RESTScopeNamespace) + baseMapper.Add(secondGVK, meta.RESTScopeNamespace) + } + Expect(callWithOther()).To(Succeed()) + Expect(callWithTarget()).To(Succeed()) + }) + + It("should rate-limit then allow more at configured rate", func() { + By("setting a small limit") + *lim = *rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + + By("forcing a reload after changing the mapper") + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + baseMapper.Add(secondGVK, meta.RESTScopeNamespace) + } + Expect(callWithOther()).To(Succeed()) + + By("calling another time to trigger rate limiting") + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + baseMapper.Add(targetGVK, meta.RESTScopeNamespace) + } + // if call consistently fails, we are sure, that it was rate-limited, + // otherwise it would have reloaded and succeeded + Consistently(callWithTarget, "90ms", "10ms").Should(beNoMatchError()) + + By("calling until no longer rate-limited") + // once call succeeds, we are sure, that it was no longer rate-limited, + // as it was allowed to reload and found matching kind/resource + Eventually(callWithTarget, "30ms", "10ms").Should(And(Succeed(), Not(beNoMatchError()))) + }) + + It("should avoid reloading twice if two requests for the same thing come in", func() { + count := 0 + // we use sleeps here to simulate two simulataneous requests by slowing things down + addToMapper = func(baseMapper *meta.DefaultRESTMapper) { + count++ + baseMapper.Add(secondGVK, meta.RESTScopeNamespace) + time.Sleep(100 * time.Millisecond) + } + + By("calling two long-running refreshes in parallel and expecting them to succeed") + done := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(callWithOther()).To(Succeed()) + close(done) + }() + + Expect(callWithOther()).To(Succeed()) + + // wait till the other goroutine completes to avoid races from a + // new test writing to mapper, and to make sure we read the right + // count + <-done + + By("ensuring that it was only refreshed once") + Expect(count).To(Equal(1)) + }) + + It("should lazily initialize if the lazy option is used", func() { + var err error + var failedOnce bool + mockErr := fmt.Errorf("mock failed once") + mapper, err = NewDynamicRESTMapper(cfg, WithLazyDiscovery, WithCustomMapper(func() (meta.RESTMapper, error) { + // Make newMapper fail once + if !failedOnce { + failedOnce = true + return nil, mockErr + } + baseMapper := meta.NewDefaultRESTMapper(nil) + addToMapper(baseMapper) + return baseMapper, nil + })) + Expect(err).NotTo(HaveOccurred()) + Expect(mapper.(*dynamicRESTMapper).staticMapper).To(BeNil()) + + Expect(callWithTarget()).To(MatchError(mockErr)) + Expect(callWithTarget()).To(Succeed()) + }) + } + + Describe("KindFor", func() { + mapperTest(func() error { + gvk, err := mapper.KindFor(targetGVR) + if err == nil { + Expect(gvk).To(Equal(targetGVK)) + } + return err + }, func() error { + gvk, err := mapper.KindFor(secondGVR) + if err == nil { + Expect(gvk).To(Equal(secondGVK)) + } + return err + }) + }) + + Describe("KindsFor", func() { + mapperTest(func() error { + gvk, err := mapper.KindsFor(targetGVR) + if err == nil { + Expect(gvk).To(Equal([]schema.GroupVersionKind{targetGVK})) + } + return err + }, func() error { + gvk, err := mapper.KindsFor(secondGVR) + if err == nil { + Expect(gvk).To(Equal([]schema.GroupVersionKind{secondGVK})) + } + return err + }) + }) + + Describe("ResourceFor", func() { + mapperTest(func() error { + gvk, err := mapper.ResourceFor(targetGVR) + if err == nil { + Expect(gvk).To(Equal(targetGVR)) + } + return err + }, func() error { + gvk, err := mapper.ResourceFor(secondGVR) + if err == nil { + Expect(gvk).To(Equal(secondGVR)) + } + return err + }) + }) + + Describe("ResourcesFor", func() { + mapperTest(func() error { + gvk, err := mapper.ResourcesFor(targetGVR) + if err == nil { + Expect(gvk).To(Equal([]schema.GroupVersionResource{targetGVR})) + } + return err + }, func() error { + gvk, err := mapper.ResourcesFor(secondGVR) + if err == nil { + Expect(gvk).To(Equal([]schema.GroupVersionResource{secondGVR})) + } + return err + }) + }) + + Describe("RESTMappingFor", func() { + mapperTest(func() error { + gvk, err := mapper.RESTMapping(targetGVK.GroupKind(), targetGVK.Version) + if err == nil { + Expect(gvk).To(Equal(&targetMapping)) + } + return err + }, func() error { + gvk, err := mapper.RESTMapping(secondGVK.GroupKind(), targetGVK.Version) + if err == nil { + Expect(gvk).To(Equal(&secondMapping)) + } + return err + }) + }) + + Describe("RESTMappingsFor", func() { + mapperTest(func() error { + gvk, err := mapper.RESTMappings(targetGVK.GroupKind(), targetGVK.Version) + if err == nil { + Expect(gvk).To(Equal([]*meta.RESTMapping{&targetMapping})) + } + return err + }, func() error { + gvk, err := mapper.RESTMappings(secondGVK.GroupKind(), targetGVK.Version) + if err == nil { + Expect(gvk).To(Equal([]*meta.RESTMapping{&secondMapping})) + } + return err + }) + }) + + Describe("ResourceSingularizer", func() { + mapperTest(func() error { + gvk, err := mapper.ResourceSingularizer(targetGVR.Resource) + if err == nil { + Expect(gvk).To(Equal(targetGVR.Resource[:len(targetGVR.Resource)-1])) + } + return err + }, func() error { + gvk, err := mapper.ResourceSingularizer(secondGVR.Resource) + if err == nil { + Expect(gvk).To(Equal(secondGVR.Resource[:len(secondGVR.Resource)-1])) + } + return err + }) + }) +}) + +func beNoMatchError() types.GomegaMatcher { + return noMatchErrorMatcher{} +} + +type noMatchErrorMatcher struct{} + +func (k noMatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + actualErr, actualOk := actual.(error) + if !actualOk { + return false, nil + } + + return meta.IsNoMatchError(actualErr), nil +} + +func (k noMatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be a NoMatchError") +} +func (k noMatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be a NoMatchError") +} diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 0000000000..730e0ba910 --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,327 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// WarningHandlerOptions are options for configuring a +// warning handler for the client which is responsible +// for surfacing API Server warnings. +type WarningHandlerOptions struct { + // SuppressWarnings decides if the warnings from the + // API server are suppressed or surfaced in the client. + SuppressWarnings bool + // AllowDuplicateLogs does not deduplicate the to-be + // logged surfaced warnings messages. See + // log.WarningHandlerOptions for considerations + // regarding deduplication + AllowDuplicateLogs bool +} + +// Options are creation options for a Client. +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Opts is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + Opts WarningHandlerOptions +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + return newClient(config, options) +} + +func newClient(config *rest.Config, options Options) (*client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + if !options.Opts.SuppressWarnings { + // surface warnings + logger := log.Log.WithName("KubeAPIWarningLogger") + // Set a WarningHandler, the default WarningHandler + // is log.KubeAPIWarningLogger with deduplication enabled. + // See log.KubeAPIWarningLoggerOptions for considerations + // regarding deduplication. + config = rest.CopyConfig(config) + config.WarningHandler = log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.Opts.AllowDuplicateLogs, + }, + ) + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj, opts...) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj, opts...) + default: + return c.typedClient.Get(ctx, key, obj, opts...) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch x := obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource. +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter. +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter. +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + } +} diff --git a/pkg/client/client_cache.go b/pkg/client/client_cache.go new file mode 100644 index 0000000000..857a0b38a7 --- /dev/null +++ b/pkg/client/client_cache.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types. +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/pkg/client/client_suite_test.go b/pkg/client/client_suite_test.go new file mode 100644 index 0000000000..c7ed32e7bc --- /dev/null +++ b/pkg/client/client_suite_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/examples/crd/pkg" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Client Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{CRDDirectoryPaths: []string{"./testdata"}} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + Expect(pkg.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go new file mode 100644 index 0000000000..a43fe87784 --- /dev/null +++ b/pkg/client/client_test.go @@ -0,0 +1,3444 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + kscheme "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/examples/crd/pkg" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const serverSideTimeoutSeconds = 10 + +func deleteDeployment(ctx context.Context, dep *appsv1.Deployment, ns string) { + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + if err == nil { + err = clientset.AppsV1().Deployments(ns).Delete(ctx, dep.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + } +} + +func deleteNamespace(ctx context.Context, ns *corev1.Namespace) { + ns, err := clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + err = clientset.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // finalize if necessary + pos := -1 + finalizers := ns.Spec.Finalizers + for i, fin := range finalizers { + if fin == "kubernetes" { + pos = i + break + } + } + if pos == -1 { + // no need to finalize + return + } + + // re-get in order to finalize + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + ns.Spec.Finalizers = append(finalizers[:pos], finalizers[pos+1:]...) + _, err = clientset.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + +WAIT_LOOP: + for i := 0; i < 10; i++ { + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // success! + return + } + select { + case <-ctx.Done(): + break WAIT_LOOP + // failed to delete in time, see failure below + case <-time.After(100 * time.Millisecond): + // do nothing, try again + } + } + Fail(fmt.Sprintf("timed out waiting for namespace %q to be deleted", ns.Name)) +} + +type mockPatchOption struct { + applied bool +} + +func (o *mockPatchOption) ApplyToPatch(_ *client.PatchOptions) { + o.applied = true +} + +// metaOnlyFromObj returns PartialObjectMetadata from a concrete Go struct that +// returns a concrete *metav1.ObjectMeta from GetObjectMeta (yes, that plays a +// bit fast and loose, but the only other options are serializing and then +// deserializing, or manually calling all the accessor funcs, which are both a bit annoying). +func metaOnlyFromObj(obj interface { + runtime.Object + metav1.ObjectMetaAccessor +}, scheme *runtime.Scheme) *metav1.PartialObjectMetadata { + metaObj := metav1.PartialObjectMetadata{} + obj.GetObjectMeta().(*metav1.ObjectMeta).DeepCopyInto(&metaObj.ObjectMeta) + kinds, _, err := scheme.ObjectKinds(obj) + if err != nil { + panic(err) + } + metaObj.SetGroupVersionKind(kinds[0]) + return &metaObj +} + +var _ = Describe("Client", func() { + + var scheme *runtime.Scheme + var depGvk schema.GroupVersionKind + var dep *appsv1.Deployment + var pod *corev1.Pod + var node *corev1.Node + var count uint64 = 0 + var replicaCount int32 = 2 + var ns = "default" + ctx := context.TODO() + + BeforeEach(func() { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("deployment-name-%v", count), Namespace: ns, Labels: map[string]string{"app": fmt.Sprintf("bar-%v", count)}}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depGvk = schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + } + // Pod is invalid without a container field in the PodSpec + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("pod-%v", count), Namespace: ns}, + Spec: corev1.PodSpec{}, + } + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("node-name-%v", count)}, + Spec: corev1.NodeSpec{}, + } + scheme = kscheme.Scheme + }, serverSideTimeoutSeconds) + + var delOptions *metav1.DeleteOptions + AfterEach(func() { + // Cleanup + var zero int64 = 0 + policy := metav1.DeletePropagationForeground + delOptions = &metav1.DeleteOptions{ + GracePeriodSeconds: &zero, + PropagationPolicy: &policy, + } + deleteDeployment(ctx, dep, ns) + _, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + if err == nil { + err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, *delOptions) + Expect(err).NotTo(HaveOccurred()) + } + }, serverSideTimeoutSeconds) + + // TODO(seans): Cast "cl" as "client" struct from "Client" interface. Then validate the + // instance values for the "client" struct. + Describe("New", func() { + It("should return a new Client", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + + It("should fail if the config is nil", func() { + cl, err := client.New(nil, client.Options{}) + Expect(err).To(HaveOccurred()) + Expect(cl).To(BeNil()) + }) + + // TODO(seans): cast as client struct and inspect Scheme + It("should use the provided Scheme if provided", func() { + cl, err := client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + + // TODO(seans): cast as client struct and inspect Scheme + It("should default the Scheme if not provided", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + + PIt("should use the provided Mapper if provided", func() { + + }) + + // TODO(seans): cast as client struct and inspect Mapper + It("should create a Mapper if not provided", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + }) + + Describe("Create", func() { + Context("with structured objects", func() { + It("should create a new object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the object") + err = cl.Create(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("writing the result back to the go struct") + Expect(dep).To(Equal(actual)) + }) + + It("should create a new object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the object") + err = cl.Create(context.TODO(), node) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("writing the result back to the go struct") + Expect(node).To(Equal(actual)) + }) + + It("should fail if the object already exists", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + old := dep.DeepCopy() + + By("creating the object") + err = cl.Create(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("creating the object a second time") + err = cl.Create(context.TODO(), old) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + }) + + It("should fail if the object does not pass server-side validation", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the pod, since required field Containers is empty") + err = cl.Create(context.TODO(), pod) + Expect(err).To(HaveOccurred()) + // TODO(seans): Add test to validate the returned error. Problems currently with + // different returned error locally versus travis. + }, serverSideTimeoutSeconds) + + It("should fail if the object cannot be mapped to a GVK", func() { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the object fails") + err = cl.Create(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + // TODO(seans3): implement these + // Example: ListOptions + }) + + Context("with the DryRun option", func() { + It("should not create a new object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the object (with DryRun)") + err = cl.Create(context.TODO(), dep, client.DryRunAll) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + Expect(actual).To(Equal(&appsv1.Deployment{})) + }) + }) + }) + + Context("with unstructured objects", func() { + It("should create a new object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("encoding the deployment as unstructured") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + + By("creating the object") + err = cl.Create(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + }) + + It("should create a new non-namespace object ", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("encoding the deployment as unstructured") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + + By("creating the object") + err = cl.Create(context.TODO(), node) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + au := &unstructured.Unstructured{} + Expect(scheme.Convert(actual, au, nil)).To(Succeed()) + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + By("writing the result back to the go struct") + + Expect(u).To(Equal(au)) + }) + + It("should fail if the object already exists", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + old := dep.DeepCopy() + + By("creating the object") + err = cl.Create(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("encoding the deployment as unstructured") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(old, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + + By("creating the object a second time") + err = cl.Create(context.TODO(), u) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + }) + + It("should fail if the object does not pass server-side validation", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("creating the pod, since required field Containers is empty") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(pod, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + err = cl.Create(context.TODO(), u) + Expect(err).To(HaveOccurred()) + // TODO(seans): Add test to validate the returned error. Problems currently with + // different returned error locally versus travis. + }, serverSideTimeoutSeconds) + + }) + + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Create(context.TODO(), obj)).NotTo(Succeed()) + }) + }) + + Context("with the DryRun option", func() { + It("should not create a new object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("encoding the deployment as unstructured") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + + By("creating the object") + err = cl.Create(context.TODO(), u, client.DryRunAll) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + Expect(actual).To(Equal(&appsv1.Deployment{})) + }) + }) + }) + + Describe("Update", func() { + Context("with structured objects", func() { + It("should update an existing object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + dep.Annotations = map[string]string{"foo": "bar"} + err = cl.Update(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should update and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + dep.SetGroupVersionKind(depGvk) + err = cl.Update(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) + }) + + It("should update an existing object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the object") + node.Annotations = map[string]string{"foo": "bar"} + err = cl.Update(context.TODO(), node) + Expect(err).NotTo(HaveOccurred()) + + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("updating non-existent object") + err = cl.Update(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object does not pass server-side validation", func() { + + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + It("should fail if the object cannot be mapped to a GVK", func() { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + dep.Annotations = map[string]string{"foo": "bar"} + err = cl.Update(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + }) + Context("with unstructured objects", func() { + It("should update an existing object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + u.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should update and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(depGvk) + u.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) + }) + + It("should update an existing object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the object") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + u.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("updating non-existent object") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(depGvk) + err = cl.Update(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + + Expect(cl.Update(context.TODO(), obj)).NotTo(Succeed()) + }) + }) + }) + + Describe("Patch", func() { + Context("Metadata Client", func() { + It("should merge patch with options", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + metadata := metaOnlyFromObj(dep, scheme) + if metadata.Labels == nil { + metadata.Labels = make(map[string]string) + } + metadata.Labels["foo"] = "bar" + + testOption := &mockPatchOption{} + Expect(cl.Patch(context.TODO(), metadata, client.Merge, testOption)).To(Succeed()) + + By("validating that patched metadata has new labels") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Labels["foo"]).To(Equal("bar")) + + By("validating patch options were applied") + Expect(testOption.applied).To(Equal(true)) + }) + }) + }) + + Describe("StatusClient", func() { + Context("with structured objects", func() { + It("should update status of an existing object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the status of Deployment") + dep.Status.Replicas = 1 + err = cl.Status().Update(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + }) + + It("should update status and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the status of Deployment") + dep.SetGroupVersionKind(depGvk) + dep.Status.Replicas = 1 + err = cl.Status().Update(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) + }) + + It("should patch status and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + dep.SetGroupVersionKind(depGvk) + depPatch := client.MergeFrom(dep.DeepCopy()) + dep.Status.Replicas = 1 + err = cl.Status().Patch(context.TODO(), dep, depPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) + }) + + It("should not update spec of an existing object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the spec and status of Deployment") + var rc int32 = 1 + dep.Status.Replicas = 1 + dep.Spec.Replicas = &rc + err = cl.Status().Update(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new status and unchanged spec") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) + }) + + It("should update an existing object non-namespace object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating status of the object") + node.Status.Phase = corev1.NodeRunning + err = cl.Status().Update(context.TODO(), node) + Expect(err).NotTo(HaveOccurred()) + + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("updating status of a non-existent object") + err = cl.Status().Update(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object cannot be mapped to a GVK", func() { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating status of the Deployment") + dep.Status.Replicas = 1 + err = cl.Status().Update(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + PIt("should fail if an API does not implement Status subresource", func() { + + }) + }) + + Context("with unstructured objects", func() { + It("should update status of an existing object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the status of Deployment") + u := &unstructured.Unstructured{} + dep.Status.Replicas = 1 + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + }) + + It("should update status and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the status of Deployment") + u := &unstructured.Unstructured{} + dep.Status.Replicas = 1 + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) + }) + + It("should patch status and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + u := &unstructured.Unstructured{} + depPatch := client.MergeFrom(dep.DeepCopy()) + dep.Status.Replicas = 1 + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Patch(context.TODO(), u, depPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) + + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + }) + + It("should not update spec of an existing object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the spec and status of Deployment") + u := &unstructured.Unstructured{} + var rc int32 = 1 + dep.Status.Replicas = 1 + dep.Spec.Replicas = &rc + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has new status and unchanged spec") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) + }) + + It("should update an existing object non-namespace object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating status of the object") + u := &unstructured.Unstructured{} + node.Status.Phase = corev1.NodeRunning + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + err = cl.Status().Update(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("updating status of a non-existent object") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(context.TODO(), u) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + PIt("should fail if an API does not implement Status subresource", func() { + + }) + + }) + + Context("with metadata objects", func() { + It("should fail to update with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Status().Update(context.TODO(), obj)).NotTo(Succeed()) + }) + + It("should patch status and preserve type information", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + objPatch := client.MergeFrom(metaOnlyFromObj(dep, scheme)) + dep.Annotations = map[string]string{"some-new-annotation": "some-new-value"} + obj := metaOnlyFromObj(dep, scheme) + err = cl.Status().Patch(context.TODO(), obj, objPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(obj.GroupVersionKind()).To(Equal(depGvk)) + + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations).To(HaveKeyWithValue("some-new-annotation", "some-new-value")) + }) + }) + }) + + Describe("Delete", func() { + Context("with structured objects", func() { + It("should delete an existing object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + depName := dep.Name + err = cl.Delete(context.TODO(), dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should delete an existing object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Node") + nodeName := node.Name + err = cl.Delete(context.TODO(), node) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + err = cl.Delete(context.TODO(), node) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + It("should fail if the object cannot be mapped to a GVK", func() { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment fails") + err = cl.Delete(context.TODO(), dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + It("should delete a collection of objects", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + err = cl.DeleteAllOf(context.TODO(), dep, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with unstructured objects", func() { + It("should delete an existing object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + depName := dep.Name + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + err = cl.Delete(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should delete an existing object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Node") + nodeName := node.Name + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + err = cl.Delete(context.TODO(), u) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + err = cl.Delete(context.TODO(), node) + Expect(err).To(HaveOccurred()) + }) + + It("should delete a collection of object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + err = cl.DeleteAllOf(context.TODO(), u, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata objects", func() { + It("should delete an existing object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should delete an existing object non-namespace object from a go struct", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Node") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).To(HaveOccurred()) + }) + + It("should delete a collection of object", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.DeleteAllOf(context.TODO(), metaObj, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + }) + + Describe("Get", func() { + Context("with structured objects", func() { + It("should fetch an existing object for a go struct", func() { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched deployment equals the created one") + Expect(dep).To(Equal(&actual)) + }) + + It("should fetch an existing non-namespace object for a go struct", func() { + By("first creating the object") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("retrieving node through client") + var actual corev1.Node + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + Expect(node).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual appsv1.Deployment + err = cl.Get(context.TODO(), key, &actual) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + It("should fail if the object cannot be mapped to a GVK", func() { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a client with an empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment fails") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + // Test this with an integrated type and a CRD to make sure it covers both proto + // and json deserialization. + for idx, object := range []client.Object{&corev1.ConfigMap{}, &pkg.ChaosPod{}} { + idx, object := idx, object + It(fmt.Sprintf("should not retain any data in the obj variable that is not on the server for %T", object), func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + object.SetName(fmt.Sprintf("retain-test-%d", idx)) + object.SetNamespace(ns) + + By("First creating the object") + toCreate := object.DeepCopyObject().(client.Object) + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := object.DeepCopyObject().(client.Object) + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(object), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received object are equal") + Expect(toCreate).Should(Equal(toGetInto)) + }) + } + + }) + + Context("with unstructured objects", func() { + It("should fetch an existing object", func() { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("encoding the Deployment as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + + By("fetching the created Deployment") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched Deployment equals the created one") + Expect(u).To(Equal(&actual)) + }) + + It("should fetch an existing non-namespace object", func() { + By("first creating the Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("encoding the Node as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Node") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched Node equals the created one") + Expect(u).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + u := &unstructured.Unstructured{} + err = cl.Get(context.TODO(), key, u) + Expect(err).To(HaveOccurred()) + }) + + It("should not retain any data in the obj variable that is not on the server", func() { + object := &unstructured.Unstructured{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + object.SetName("retain-unstructured") + object.SetNamespace(ns) + object.SetAPIVersion("chaosapps.metamagical.io/v1") + object.SetKind("ChaosPod") + + By("First creating the object") + toCreate := object.DeepCopyObject().(client.Object) + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := object.DeepCopyObject().(client.Object) + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(object), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received object are equal") + Expect(toCreate).Should(Equal(toGetInto)) + }) + }) + Context("with metadata objects", func() { + It("should fetch an existing object for a go struct", func() { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment") + var actual metav1.PartialObjectMetadata + gvk := schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + } + actual.SetGroupVersionKind(gvk) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating that the GVK has been preserved") + Expect(actual.GroupVersionKind()).To(Equal(gvk)) + + By("validating the fetched deployment equals the created one") + Expect(metaOnlyFromObj(dep, scheme)).To(Equal(&actual)) + }) + + It("should fetch an existing non-namespace object for a go struct", func() { + By("first creating the object") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("retrieving node through client") + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Version: "v1", + Kind: "Node", + }) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + Expect(metaOnlyFromObj(node, scheme)).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + err = cl.Get(context.TODO(), key, &actual) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + It("should not retain any data in the obj variable that is not on the server", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("First creating the object") + toCreate := &pkg.ChaosPod{ObjectMeta: metav1.ObjectMeta{Name: "retain-metadata", Namespace: ns}} + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := &metav1.PartialObjectMetadata{ + TypeMeta: metav1.TypeMeta{APIVersion: "chaosapps.metamagical.io/v1", Kind: "ChaosPod"}, + ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: "retain-metadata"}, + } + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(toGetInto), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received objects metadata are equal") + Expect(toCreate.ObjectMeta).Should(Equal(toGetInto.ObjectMeta)) + }) + }) + }) + + Describe("List", func() { + Context("with structured objects", func() { + It("should fetch collection of objects", func() { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &appsv1.DeploymentList{} + Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.Name == dep.Name && item.Namespace == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }, serverSideTimeoutSeconds) + + It("should fetch unstructured collection of objects", func() { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(context.Background(), deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + Expect(item.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + })) + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }, serverSideTimeoutSeconds) + + It("should fetch unstructured collection of objects, even if scheme is empty", func() { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(context.Background(), deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }, serverSideTimeoutSeconds) + + It("should return an empty list if there are no matching objects", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + deps := &appsv1.DeploymentList{} + Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(deps.Items).To(BeEmpty()) + }, serverSideTimeoutSeconds) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func() { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: ns, + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with the app=backend label") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: ns, + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with label app=backend") + deps := &appsv1.DeploymentList{} + labels := map[string]string{"app": "backend"} + err = cl.List(context.Background(), deps, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector", func() { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-1") + deps := &appsv1.DeploymentList{} + err = cl.List(context.Background(), deps, client.InNamespace("test-namespace-1")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-1 is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + + deleteDeployment(ctx, depFrontend, "test-namespace-1") + deleteDeployment(ctx, depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }, serverSideTimeoutSeconds) + + It("should filter results by field selector", func() { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + deps := &appsv1.DeploymentList{} + err = cl.List(context.Background(), deps, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector and label selector", func() { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depFrontend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-3 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depBackend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-4", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + deps := &appsv1.DeploymentList{} + labels := map[string]string{"app": "frontend"} + err = cl.List(context.Background(), deps, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-3") + deleteDeployment(ctx, depBackend3, "test-namespace-3") + deleteDeployment(ctx, depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + }, serverSideTimeoutSeconds) + + It("should filter results using limit and continue options", func() { + + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep4, ns) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing 1 deployment when limit=1 is used") + deps := &appsv1.DeploymentList{} + err = cl.List(context.Background(), deps, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(1)) + Expect(deps.Continue).NotTo(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := deps.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + deps = &appsv1.DeploymentList{} + err = cl.List(context.Background(), deps, + client.Limit(1), + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(1)) + Expect(deps.Continue).NotTo(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep2.Name)) + + continueToken = deps.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + deps = &appsv1.DeploymentList{} + err = cl.List(context.Background(), deps, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(2)) + Expect(deps.Continue).To(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep3.Name)) + Expect(deps.Items[1].Name).To(Equal(dep4.Name)) + }, serverSideTimeoutSeconds) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + }) + + Context("with unstructured objects", func() { + It("should fetch collection of objects", func() { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(context.Background(), deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }, serverSideTimeoutSeconds) + + It("should return an empty list if there are no matching objects", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(deps.Items).To(BeEmpty()) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector", func() { + By("creating a Deployment in test-namespace-5") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-5"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-5"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-5").Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-6") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-6"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-6"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-6").Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-5") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(context.Background(), deps, client.InNamespace("test-namespace-5")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-5 is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.GetName()).To(Equal("deployment-frontend")) + + deleteDeployment(ctx, depFrontend, "test-namespace-5") + deleteDeployment(ctx, depBackend, "test-namespace-6") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }, serverSideTimeoutSeconds) + + It("should filter results by field selector", func() { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(context.Background(), deps, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.GetName()).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector and label selector", func() { + By("creating a Deployment in test-namespace-7 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-7"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-7", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(ctx, depFrontend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-7 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-7", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(ctx, depBackend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-8 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-8"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-8", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-8").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-8 with label app=frontend") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + labels := map[string]string{"app": "frontend"} + err = cl.List(context.Background(), deps, + client.InNamespace("test-namespace-7"), client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-7 with label app=frontend is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.GetName()).To(Equal("deployment-frontend")) + Expect(actual.GetNamespace()).To(Equal("test-namespace-7")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-7") + deleteDeployment(ctx, depBackend3, "test-namespace-7") + deleteDeployment(ctx, depFrontend4, "test-namespace-8") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + }, serverSideTimeoutSeconds) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should filter results by namespace selector", func() { + + }) + }) + Context("with metadata objects", func() { + It("should fetch collection of objects", func() { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + gvk := schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + } + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(gvk) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + By("validating that the list GVK has been preserved") + Expect(metaList.GroupVersionKind()).To(Equal(gvk)) + + By("validating that the list has the expected deployment") + Expect(metaList.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range metaList.Items { + Expect(item.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + })) + + if item.Name == dep.Name && item.Namespace == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }, serverSideTimeoutSeconds) + + It("should return an empty list if there are no matching objects", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(metaList.Items).To(BeEmpty()) + }, serverSideTimeoutSeconds) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func() { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: ns, + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with the app=backend label") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: ns, + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with label app=backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "backend"} + err = cl.List(context.Background(), metaList, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector", func() { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-1") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, client.InNamespace("test-namespace-1")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-1 is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + + deleteDeployment(ctx, depFrontend, "test-namespace-1") + deleteDeployment(ctx, depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }, serverSideTimeoutSeconds) + + It("should filter results by field selector", func() { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector and label selector", func() { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depFrontend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-3 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depBackend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-4", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "frontend"} + err = cl.List(context.Background(), metaList, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-3") + deleteDeployment(ctx, depBackend3, "test-namespace-3") + deleteDeployment(ctx, depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + }, serverSideTimeoutSeconds) + + It("should filter results using limit and continue options", func() { + + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep4, ns) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing 1 deployment when limit=1 is used") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := metaList.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep2.Name)) + + continueToken = metaList.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(2)) + Expect(metaList.Continue).To(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep3.Name)) + Expect(metaList.Items[1].Name).To(Equal(dep4.Name)) + }, serverSideTimeoutSeconds) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + }) + }) + + Describe("CreateOptions", func() { + It("should allow setting DryRun to 'all'", func() { + co := &client.CreateOptions{} + client.DryRunAll.ApplyToCreate(co) + all := []string{metav1.DryRunAll} + Expect(co.AsCreateOptions().DryRun).To(Equal(all)) + }) + + It("should allow setting the field manager", func() { + po := &client.CreateOptions{} + client.FieldOwner("some-owner").ApplyToCreate(po) + Expect(po.AsCreateOptions().FieldManager).To(Equal("some-owner")) + }) + + It("should produce empty metav1.CreateOptions if nil", func() { + var co *client.CreateOptions + Expect(co.AsCreateOptions()).To(Equal(&metav1.CreateOptions{})) + co = &client.CreateOptions{} + Expect(co.AsCreateOptions()).To(Equal(&metav1.CreateOptions{})) + }) + }) + + Describe("DeleteOptions", func() { + It("should allow setting GracePeriodSeconds", func() { + do := &client.DeleteOptions{} + client.GracePeriodSeconds(1).ApplyToDelete(do) + gp := int64(1) + Expect(do.AsDeleteOptions().GracePeriodSeconds).To(Equal(&gp)) + }) + + It("should allow setting Precondition", func() { + do := &client.DeleteOptions{} + pc := metav1.NewUIDPreconditions("uid") + client.Preconditions(*pc).ApplyToDelete(do) + Expect(do.AsDeleteOptions().Preconditions).To(Equal(pc)) + Expect(do.Preconditions).To(Equal(pc)) + }) + + It("should allow setting PropagationPolicy", func() { + do := &client.DeleteOptions{} + client.PropagationPolicy(metav1.DeletePropagationForeground).ApplyToDelete(do) + dp := metav1.DeletePropagationForeground + Expect(do.AsDeleteOptions().PropagationPolicy).To(Equal(&dp)) + }) + + It("should allow setting DryRun", func() { + do := &client.DeleteOptions{} + client.DryRunAll.ApplyToDelete(do) + all := []string{metav1.DryRunAll} + Expect(do.AsDeleteOptions().DryRun).To(Equal(all)) + }) + + It("should produce empty metav1.DeleteOptions if nil", func() { + var do *client.DeleteOptions + Expect(do.AsDeleteOptions()).To(Equal(&metav1.DeleteOptions{})) + do = &client.DeleteOptions{} + Expect(do.AsDeleteOptions()).To(Equal(&metav1.DeleteOptions{})) + }) + + It("should merge multiple options together", func() { + gp := int64(1) + pc := metav1.NewUIDPreconditions("uid") + dp := metav1.DeletePropagationForeground + do := &client.DeleteOptions{} + do.ApplyOptions([]client.DeleteOption{ + client.GracePeriodSeconds(gp), + client.Preconditions(*pc), + client.PropagationPolicy(dp), + }) + Expect(do.GracePeriodSeconds).To(Equal(&gp)) + Expect(do.Preconditions).To(Equal(pc)) + Expect(do.PropagationPolicy).To(Equal(&dp)) + }) + }) + + Describe("DeleteCollectionOptions", func() { + It("should be convertable to list options", func() { + gp := int64(1) + do := &client.DeleteAllOfOptions{} + do.ApplyOptions([]client.DeleteAllOfOption{ + client.GracePeriodSeconds(gp), + client.MatchingLabels{"foo": "bar"}, + }) + + listOpts := do.AsListOptions() + Expect(listOpts).NotTo(BeNil()) + Expect(listOpts.LabelSelector).To(Equal("foo=bar")) + }) + + It("should be convertable to delete options", func() { + gp := int64(1) + do := &client.DeleteAllOfOptions{} + do.ApplyOptions([]client.DeleteAllOfOption{ + client.GracePeriodSeconds(gp), + client.MatchingLabels{"foo": "bar"}, + }) + + deleteOpts := do.AsDeleteOptions() + Expect(deleteOpts).NotTo(BeNil()) + Expect(deleteOpts.GracePeriodSeconds).To(Equal(&gp)) + }) + }) + + Describe("GetOptions", func() { + It("should be convertable to metav1.GetOptions", func() { + o := (&client.GetOptions{}).ApplyOptions([]client.GetOption{ + &client.GetOptions{Raw: &metav1.GetOptions{ResourceVersion: "RV0"}}, + }) + mo := o.AsGetOptions() + Expect(mo).NotTo(BeNil()) + Expect(mo.ResourceVersion).To(Equal("RV0")) + }) + + It("should produce empty metav1.GetOptions if nil", func() { + var o *client.GetOptions + Expect(o.AsGetOptions()).To(Equal(&metav1.GetOptions{})) + o = &client.GetOptions{} + Expect(o.AsGetOptions()).To(Equal(&metav1.GetOptions{})) + }) + }) + + Describe("ListOptions", func() { + It("should be convertable to metav1.ListOptions", func() { + lo := (&client.ListOptions{}).ApplyOptions([]client.ListOption{ + client.MatchingFields{"field1": "bar"}, + client.InNamespace("test-namespace"), + client.MatchingLabels{"foo": "bar"}, + client.Limit(1), + client.Continue("foo"), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.LabelSelector).To(Equal("foo=bar")) + Expect(mlo.FieldSelector).To(Equal("field1=bar")) + Expect(mlo.Limit).To(Equal(int64(1))) + Expect(mlo.Continue).To(Equal("foo")) + }) + + It("should be populated by MatchingLabels", func() { + lo := &client.ListOptions{} + client.MatchingLabels{"foo": "bar"}.ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.LabelSelector.String()).To(Equal("foo=bar")) + }) + + It("should be populated by MatchingField", func() { + lo := &client.ListOptions{} + client.MatchingFields{"field1": "bar"}.ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.FieldSelector.String()).To(Equal("field1=bar")) + }) + + It("should be populated by InNamespace", func() { + lo := &client.ListOptions{} + client.InNamespace("test").ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.Namespace).To(Equal("test")) + }) + + It("should produce empty metav1.ListOptions if nil", func() { + var do *client.ListOptions + Expect(do.AsListOptions()).To(Equal(&metav1.ListOptions{})) + do = &client.ListOptions{} + Expect(do.AsListOptions()).To(Equal(&metav1.ListOptions{})) + }) + + It("should be populated by Limit", func() { + lo := &client.ListOptions{} + client.Limit(1).ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.Limit).To(Equal(int64(1))) + }) + + It("should ignore Limit when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Limit(1), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Limit).To(BeZero()) + }) + + It("should be populated by Continue", func() { + lo := &client.ListOptions{} + client.Continue("foo").ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.Continue).To(Equal("foo")) + }) + + It("should ignore Continue token when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Continue("foo"), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Continue).To(BeEmpty()) + }) + + It("should ignore both Limit and Continue token when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Limit(1), + client.Continue("foo"), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Limit).To(BeZero()) + Expect(mlo.Continue).To(BeEmpty()) + }) + }) + + Describe("UpdateOptions", func() { + It("should allow setting DryRun to 'all'", func() { + uo := &client.UpdateOptions{} + client.DryRunAll.ApplyToUpdate(uo) + all := []string{metav1.DryRunAll} + Expect(uo.AsUpdateOptions().DryRun).To(Equal(all)) + }) + + It("should allow setting the field manager", func() { + po := &client.UpdateOptions{} + client.FieldOwner("some-owner").ApplyToUpdate(po) + Expect(po.AsUpdateOptions().FieldManager).To(Equal("some-owner")) + }) + + It("should produce empty metav1.UpdateOptions if nil", func() { + var co *client.UpdateOptions + Expect(co.AsUpdateOptions()).To(Equal(&metav1.UpdateOptions{})) + co = &client.UpdateOptions{} + Expect(co.AsUpdateOptions()).To(Equal(&metav1.UpdateOptions{})) + }) + }) + + Describe("PatchOptions", func() { + It("should allow setting DryRun to 'all'", func() { + po := &client.PatchOptions{} + client.DryRunAll.ApplyToPatch(po) + all := []string{metav1.DryRunAll} + Expect(po.AsPatchOptions().DryRun).To(Equal(all)) + }) + + It("should allow setting Force to 'true'", func() { + po := &client.PatchOptions{} + client.ForceOwnership.ApplyToPatch(po) + mpo := po.AsPatchOptions() + Expect(mpo.Force).NotTo(BeNil()) + Expect(*mpo.Force).To(BeTrue()) + }) + + It("should allow setting the field manager", func() { + po := &client.PatchOptions{} + client.FieldOwner("some-owner").ApplyToPatch(po) + Expect(po.AsPatchOptions().FieldManager).To(Equal("some-owner")) + }) + + It("should produce empty metav1.PatchOptions if nil", func() { + var po *client.PatchOptions + Expect(po.AsPatchOptions()).To(Equal(&metav1.PatchOptions{})) + po = &client.PatchOptions{} + Expect(po.AsPatchOptions()).To(Equal(&metav1.PatchOptions{})) + }) + }) +}) + +var _ = Describe("DelegatingClient", func() { + Describe("Get", func() { + It("should call cache reader when structured object", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + }) + Expect(err).NotTo(HaveOccurred()) + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: "ns", Name: "name"} + Expect(dReader.Get(context.TODO(), key, &actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) + + When("getting unstructured objects", func() { + var dep *appsv1.Deployment + + BeforeEach(func() { + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment1", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "x", Image: "x"}}}, + }, + }, + } + var err error + dep, err = clientset.AppsV1().Deployments("default").Create(context.Background(), dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(clientset.AppsV1().Deployments("default").Delete( + context.Background(), + dep.Name, + metav1.DeleteOptions{}, + )).To(Succeed()) + }) + It("should call client reader when not cached", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.Unstructured{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + actual.SetName(dep.Name) + key := client.ObjectKey{Namespace: dep.Namespace, Name: dep.Name} + Expect(dReader.Get(context.TODO(), key, actual)).To(Succeed()) + Expect(0).To(Equal(cachedReader.Called)) + }) + It("should call cache reader when cached", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + CacheUnstructured: true, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.Unstructured{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + actual.SetName(dep.Name) + key := client.ObjectKey{Namespace: dep.Namespace, Name: dep.Name} + Expect(dReader.Get(context.TODO(), key, actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) + }) + }) + Describe("List", func() { + It("should call cache reader when structured object", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + }) + Expect(err).NotTo(HaveOccurred()) + var actual appsv1.DeploymentList + Expect(dReader.List(context.Background(), &actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) + + When("listing unstructured objects", func() { + It("should call client reader when not cached", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.UnstructuredList{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(dReader.List(context.Background(), actual)).To(Succeed()) + Expect(0).To(Equal(cachedReader.Called)) + }) + It("should call cache reader when cached", func() { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + dReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cachedReader, + Client: cl, + CacheUnstructured: true, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.UnstructuredList{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(dReader.List(context.Background(), actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) + }) + }) +}) + +var _ = Describe("Patch", func() { + Describe("MergeFrom", func() { + var cm *corev1.ConfigMap + + BeforeEach(func() { + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "cm", + ResourceVersion: "10", + }, + } + }) + + It("creates a merge patch with the modifications applied during the mutation", func() { + const ( + annotationKey = "test" + annotationValue = "foo" + ) + + By("creating a merge patch") + patch := client.MergeFrom(cm.DeepCopy()) + + By("returning a patch with type MergePatch") + Expect(patch.Type()).To(Equal(types.MergePatchType)) + + By("retrieving modifying the config map") + metav1.SetMetaDataAnnotation(&cm.ObjectMeta, annotationKey, annotationValue) + + By("computing the patch data") + data, err := patch.Data(cm) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data only containing the annotation change") + Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, annotationKey, annotationValue)))) + }) + + It("creates a merge patch with the modifications applied during the mutation, using optimistic locking", func() { + const ( + annotationKey = "test" + annotationValue = "foo" + ) + + By("creating a merge patch") + patch := client.MergeFromWithOptions(cm.DeepCopy(), client.MergeFromWithOptimisticLock{}) + + By("returning a patch with type MergePatch") + Expect(patch.Type()).To(Equal(types.MergePatchType)) + + By("retrieving modifying the config map") + metav1.SetMetaDataAnnotation(&cm.ObjectMeta, annotationKey, annotationValue) + + By("computing the patch data") + data, err := patch.Data(cm) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data containing the annotation change and the resourceVersion change") + Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"},"resourceVersion":"%s"}}`, annotationKey, annotationValue, cm.ResourceVersion)))) + }) + }) + + Describe("StrategicMergeFrom", func() { + var dep *appsv1.Deployment + + BeforeEach(func() { + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "dep", + ResourceVersion: "10", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{Containers: []corev1.Container{{ + Name: "main", + Image: "foo:v1", + }, { + Name: "sidecar", + Image: "bar:v1", + }}}, + }, + }, + } + }) + + It("creates a strategic merge patch with the modifications applied during the mutation", func() { + By("creating a strategic merge patch") + patch := client.StrategicMergeFrom(dep.DeepCopy()) + + By("returning a patch with type StrategicMergePatchType") + Expect(patch.Type()).To(Equal(types.StrategicMergePatchType)) + + By("updating the main container's image") + for i, c := range dep.Spec.Template.Spec.Containers { + if c.Name == "main" { + c.Image = "foo:v2" + } + dep.Spec.Template.Spec.Containers[i] = c + } + + By("computing the patch data") + data, err := patch.Data(dep) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data only containing the image change") + Expect(data).To(Equal([]byte(`{"spec":{"template":{"spec":{"$setElementOrder/containers":[{"name":"main"},` + + `{"name":"sidecar"}],"containers":[{"image":"foo:v2","name":"main"}]}}}}`))) + }) + + It("creates a strategic merge patch with the modifications applied during the mutation, using optimistic locking", func() { + By("creating a strategic merge patch") + patch := client.StrategicMergeFrom(dep.DeepCopy(), client.MergeFromWithOptimisticLock{}) + + By("returning a patch with type StrategicMergePatchType") + Expect(patch.Type()).To(Equal(types.StrategicMergePatchType)) + + By("updating the main container's image") + for i, c := range dep.Spec.Template.Spec.Containers { + if c.Name == "main" { + c.Image = "foo:v2" + } + dep.Spec.Template.Spec.Containers[i] = c + } + + By("computing the patch data") + data, err := patch.Data(dep) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data containing the image change and the resourceVersion change") + Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"resourceVersion":"%s"},`+ + `"spec":{"template":{"spec":{"$setElementOrder/containers":[{"name":"main"},{"name":"sidecar"}],"containers":[{"image":"foo:v2","name":"main"}]}}}}`, + dep.ResourceVersion)))) + }) + }) +}) + +var _ = Describe("IgnoreNotFound", func() { + It("should return nil on a 'NotFound' error", func() { + By("creating a NotFound error") + err := apierrors.NewNotFound(schema.GroupResource{}, "") + + By("returning no error") + Expect(client.IgnoreNotFound(err)).To(Succeed()) + }) + + It("should return the error on a status other than not found", func() { + By("creating a BadRequest error") + err := apierrors.NewBadRequest("") + + By("returning an error") + Expect(client.IgnoreNotFound(err)).To(HaveOccurred()) + }) + + It("should return the error on a non-status error", func() { + By("creating an fmt error") + err := fmt.Errorf("arbitrary error") + + By("returning an error") + Expect(client.IgnoreNotFound(err)).To(HaveOccurred()) + }) +}) + +var _ = Describe("IgnoreAlreadyExists", func() { + It("should return nil on a 'AlreadyExists' error", func() { + By("creating a AlreadyExists error") + err := apierrors.NewAlreadyExists(schema.GroupResource{}, "") + + By("returning no error") + Expect(client.IgnoreAlreadyExists(err)).To(Succeed()) + }) + + It("should return the error on a status other than already exists", func() { + By("creating a BadRequest error") + err := apierrors.NewBadRequest("") + + By("returning an error") + Expect(client.IgnoreAlreadyExists(err)).To(HaveOccurred()) + }) + + It("should return the error on a non-status error", func() { + By("creating an fmt error") + err := fmt.Errorf("arbitrary error") + + By("returning an error") + Expect(client.IgnoreAlreadyExists(err)).To(HaveOccurred()) + }) +}) + +type fakeReader struct { + Called int +} + +func (f *fakeReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + f.Called++ + return nil +} + +func (f *fakeReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + f.Called++ + return nil +} diff --git a/pkg/client/codec.go b/pkg/client/codec.go new file mode 100644 index 0000000000..9c2923106c --- /dev/null +++ b/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/pkg/client/config/config.go b/pkg/client/config/config.go new file mode 100644 index 0000000000..ff44a225fe --- /dev/null +++ b/pkg/client/config/config.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var ( + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") +) + +func init() { + // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users + flag.StringVar(&kubeconfig, "kubeconfig", "", + "Paths to a kubeconfig. Only required if out-of-cluster.") +} + +// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence: +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfig() (*rest.Config, error) { + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence: +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) + if err != nil { + return nil, err + } + + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + + return cfg, nil +} + +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + +// loadConfig loads a REST Config as per the rules specified in GetConfig. +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that + if len(kubeconfig) > 0 { + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) + } + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { + return c, nil + } + } + + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + // + // NOTE: For default config file locations, upstream only checks + // $HOME for the user's home directory, but we can also try + // os/user.HomeDir when $HOME is unset. + // + // TODO(jlanford): could this be done upstream? + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %w", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) + } + + return loadConfigWithContext("", loadingRules, context) +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + +// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// Will log an error and exit if there is an error creating the rest.Config. +func GetConfigOrDie() *rest.Config { + config, err := GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + return config +} diff --git a/pkg/client/config/config_suite_test.go b/pkg/client/config/config_suite_test.go new file mode 100644 index 0000000000..4d07c03c4b --- /dev/null +++ b/pkg/client/config/config_suite_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Client Config Test Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}, 60) diff --git a/pkg/client/config/config_test.go b/pkg/client/config/config_test.go new file mode 100644 index 0000000000..a1f04d9e6e --- /dev/null +++ b/pkg/client/config/config_test.go @@ -0,0 +1,234 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "os" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type testCase struct { + text string + context string + kubeconfigFlag string + kubeconfigEnv []string + wantHost string +} + +var _ = Describe("Config", func() { + + var dir string + + origRecommendedHomeFile := clientcmd.RecommendedHomeFile + + BeforeEach(func() { + // create temporary directory for test case + var err error + dir, err = os.MkdirTemp("", "cr-test") + Expect(err).NotTo(HaveOccurred()) + + // override $HOME/.kube/config + clientcmd.RecommendedHomeFile = filepath.Join(dir, ".kubeconfig") + }) + + AfterEach(func() { + os.Unsetenv(clientcmd.RecommendedConfigPathEnvVar) + kubeconfig = "" + clientcmd.RecommendedHomeFile = origRecommendedHomeFile + + err := os.RemoveAll(dir) + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("GetConfigWithContext", func() { + defineTests := func(testCases []testCase) { + for _, testCase := range testCases { + tc := testCase + It(tc.text, func() { + // set global and environment configs + setConfigs(tc, dir) + + // run the test + cfg, err := GetConfigWithContext(tc.context) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.Host).To(Equal(tc.wantHost)) + }) + } + } + + Context("when kubeconfig files don't exist", func() { + It("should fail", func() { + err := os.Unsetenv(clientcmd.RecommendedConfigPathEnvVar) + Expect(err).NotTo(HaveOccurred()) + + cfg, err := GetConfigWithContext("") + Expect(cfg).To(BeNil()) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("when in-cluster", func() { + kubeconfigFiles := map[string]string{ + "kubeconfig-multi-context": genKubeconfig("from-multi-env-1", "from-multi-env-2"), + ".kubeconfig": genKubeconfig("from-home"), + } + BeforeEach(func() { + err := createFiles(kubeconfigFiles, dir) + Expect(err).NotTo(HaveOccurred()) + + // override in-cluster config loader + loadInClusterConfig = func() (*rest.Config, error) { + return &rest.Config{Host: "from-in-cluster"}, nil + } + }) + AfterEach(func() { loadInClusterConfig = rest.InClusterConfig }) + + testCases := []testCase{ + { + text: "should prefer the envvar over the in-cluster config", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should prefer in-cluster over the recommended home file", + wantHost: "from-in-cluster", + }, + } + defineTests(testCases) + }) + + Context("when outside the cluster", func() { + kubeconfigFiles := map[string]string{ + "kubeconfig-flag": genKubeconfig("from-flag"), + "kubeconfig-multi-context": genKubeconfig("from-multi-env-1", "from-multi-env-2"), + "kubeconfig-env-1": genKubeconfig("from-env-1"), + "kubeconfig-env-2": genKubeconfig("from-env-2"), + ".kubeconfig": genKubeconfig("from-home"), + } + BeforeEach(func() { + err := createFiles(kubeconfigFiles, dir) + Expect(err).NotTo(HaveOccurred()) + }) + testCases := []testCase{ + { + text: "should use the --kubeconfig flag", + kubeconfigFlag: "kubeconfig-flag", + wantHost: "from-flag", + }, + { + text: "should use the envvar", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should use the recommended home file", + wantHost: "from-home", + }, + { + text: "should prefer the flag over the envvar", + kubeconfigFlag: "kubeconfig-flag", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-flag", + }, + { + text: "should prefer the envvar over the recommended home file", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should allow overriding the context", + context: "from-multi-env-2", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-2", + }, + { + text: "should support a multi-value envvar", + context: "from-env-2", + kubeconfigEnv: []string{"kubeconfig-env-1", "kubeconfig-env-2"}, + wantHost: "from-env-2", + }, + } + defineTests(testCases) + }) + }) +}) + +func setConfigs(tc testCase, dir string) { + // Set kubeconfig flag value + if len(tc.kubeconfigFlag) > 0 { + kubeconfig = filepath.Join(dir, tc.kubeconfigFlag) + } + + // Set KUBECONFIG env value + if len(tc.kubeconfigEnv) > 0 { + kubeconfigEnvPaths := []string{} + for _, k := range tc.kubeconfigEnv { + kubeconfigEnvPaths = append(kubeconfigEnvPaths, filepath.Join(dir, k)) + } + os.Setenv(clientcmd.RecommendedConfigPathEnvVar, strings.Join(kubeconfigEnvPaths, ":")) + } +} + +func createFiles(files map[string]string, dir string) error { + for path, data := range files { + if err := os.WriteFile(filepath.Join(dir, path), []byte(data), 0644); err != nil { //nolint:gosec + return err + } + } + return nil +} + +func genKubeconfig(contexts ...string) string { + var sb strings.Builder + sb.WriteString(`--- +apiVersion: v1 +kind: Config +clusters: +`) + for _, ctx := range contexts { + sb.WriteString(`- cluster: + server: ` + ctx + ` + name: ` + ctx + ` +`) + } + sb.WriteString("contexts:\n") + for _, ctx := range contexts { + sb.WriteString(`- context: + cluster: ` + ctx + ` + user: ` + ctx + ` + name: ` + ctx + ` +`) + } + + sb.WriteString("users:\n") + for _, ctx := range contexts { + sb.WriteString(`- name: ` + ctx + ` +`) + } + sb.WriteString("preferences: {}\n") + if len(contexts) > 0 { + sb.WriteString("current-context: " + contexts[0] + "\n") + } + + return sb.String() +} diff --git a/pkg/client/config/doc.go b/pkg/client/config/doc.go new file mode 100644 index 0000000000..796c9cf590 --- /dev/null +++ b/pkg/client/config/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains libraries for initializing REST configs for talking to the Kubernetes API +package config diff --git a/pkg/client/doc.go b/pkg/client/doc.go new file mode 100644 index 0000000000..e0e2885094 --- /dev/null +++ b/pkg/client/doc.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// # Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// It is a common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// # Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// # Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/pkg/client/dryrun.go b/pkg/client/dryrun.go new file mode 100644 index 0000000000..14606a5794 --- /dev/null +++ b/pkg/client/dryrun.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + return c.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter. +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter. +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter. +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/pkg/client/dryrun_test.go b/pkg/client/dryrun_test.go new file mode 100644 index 0000000000..0a46e5617d --- /dev/null +++ b/pkg/client/dryrun_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("DryRunClient", func() { + var dep *appsv1.Deployment + var count uint64 = 0 + var replicaCount int32 = 2 + var ns = "default" + ctx := context.Background() + + getClient := func() client.Client { + nonDryRunClient, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nonDryRunClient).NotTo(BeNil()) + return client.NewDryRunClient(nonDryRunClient) + } + + BeforeEach(func() { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("dry-run-deployment-%v", count), + Namespace: ns, + Labels: map[string]string{"name": fmt.Sprintf("dry-run-deployment-%v", count)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully Get an object", func() { + name := types.NamespacedName{Namespace: ns, Name: dep.Name} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(dep)) + }) + + It("should successfully List objects", func() { + result := &appsv1.DeploymentList{} + opts := client.MatchingLabels(dep.Labels) + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + + It("should not create an object", func() { + newDep := dep.DeepCopy() + newDep.Name = "new-deployment" + + Expect(getClient().Create(ctx, newDep)).ToNot(HaveOccurred()) + + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, newDep.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should not create an object with opts", func() { + newDep := dep.DeepCopy() + newDep.Name = "new-deployment" + opts := &client.CreateOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Create(ctx, newDep, opts)).ToNot(HaveOccurred()) + + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, newDep.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should refuse a create request for an invalid object", func() { + changedDep := dep.DeepCopy() + changedDep.Spec.Template.Spec.Containers = nil + + err := getClient().Create(ctx, changedDep) + Expect(apierrors.IsInvalid(err)).To(BeTrue()) + }) + + It("should not change objects via update", func() { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + + Expect(getClient().Update(ctx, changedDep)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update with opts", func() { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + opts := &client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Update(ctx, changedDep, opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should refuse an update request for an invalid change", func() { + changedDep := dep.DeepCopy() + changedDep.Spec.Template.Spec.Containers = nil + + err := getClient().Update(ctx, changedDep) + Expect(apierrors.IsInvalid(err)).To(BeTrue()) + }) + + It("should not change objects via patch", func() { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + + Expect(getClient().Patch(ctx, changedDep, client.MergeFrom(dep))).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via patch with opts", func() { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + opts := &client.PatchOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Patch(ctx, changedDep, client.MergeFrom(dep), opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects", func() { + Expect(getClient().Delete(ctx, dep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects with opts", func() { + opts := &client.DeleteOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Delete(ctx, dep, opts)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects via deleteAllOf", func() { + opts := []client.DeleteAllOfOption{client.InNamespace(ns), client.MatchingLabels(dep.Labels)} + + Expect(getClient().DeleteAllOf(ctx, dep, opts...)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update status", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Update(ctx, changedDep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update status with opts", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + opts := &client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Status().Update(ctx, changedDep, opts)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status patch", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep))).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status patch with opts", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + opts := &client.PatchOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep), opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) +}) diff --git a/pkg/client/example_test.go b/pkg/client/example_test.go new file mode 100644 index 0000000000..c69caabcd2 --- /dev/null +++ b/pkg/client/example_test.go @@ -0,0 +1,268 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "fmt" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + c client.Client + someIndexer client.FieldIndexer +) + +func ExampleNew() { + cl, err := client.New(config.GetConfigOrDie(), client.Options{}) + if err != nil { + fmt.Println("failed to create client") + os.Exit(1) + } + + podList := &corev1.PodList{} + + err = cl.List(context.Background(), podList, client.InNamespace("default")) + if err != nil { + fmt.Printf("failed to list pods in namespace default: %v\n", err) + os.Exit(1) + } +} + +// This example shows how to use the client with typed and unstructured objects to retrieve an object. +func ExampleClient_get() { + // Using a typed object. + pod := &corev1.Pod{} + // c is a created client. + _ = c.Get(context.Background(), client.ObjectKey{ + Namespace: "namespace", + Name: "name", + }, pod) + + // Using a unstructured object. + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.Get(context.Background(), client.ObjectKey{ + Namespace: "namespace", + Name: "name", + }, u) +} + +// This example shows how to use the client with typed and unstructured objects to create objects. +func ExampleClient_create() { + // Using a typed object. + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "name", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "nginx", + Name: "nginx", + }, + }, + }, + } + // c is a created client. + _ = c.Create(context.Background(), pod) + + // Using a unstructured object. + u := &unstructured.Unstructured{} + u.Object = map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "name", + "namespace": "namespace", + }, + "spec": map[string]interface{}{ + "replicas": 2, + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "foo": "bar", + }, + }, + "template": map[string]interface{}{ + "labels": map[string]interface{}{ + "foo": "bar", + }, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "nginx", + "image": "nginx", + }, + }, + }, + }, + }, + } + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.Create(context.Background(), u) +} + +// This example shows how to use the client with typed and unstructured objects to list objects. +func ExampleClient_list() { + // Using a typed object. + pod := &corev1.PodList{} + // c is a created client. + _ = c.List(context.Background(), pod) + + // Using a unstructured object. + u := &unstructured.UnstructuredList{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + _ = c.List(context.Background(), u) +} + +// This example shows how to use the client with typed and unstructured objects to update objects. +func ExampleClient_update() { + // Using a typed object. + pod := &corev1.Pod{} + // c is a created client. + _ = c.Get(context.Background(), client.ObjectKey{ + Namespace: "namespace", + Name: "name", + }, pod) + pod.SetFinalizers(append(pod.GetFinalizers(), "new-finalizer")) + _ = c.Update(context.Background(), pod) + + // Using a unstructured object. + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.Get(context.Background(), client.ObjectKey{ + Namespace: "namespace", + Name: "name", + }, u) + u.SetFinalizers(append(u.GetFinalizers(), "new-finalizer")) + _ = c.Update(context.Background(), u) +} + +// This example shows how to use the client with typed and unstructured objects to patch objects. +func ExampleClient_patch() { + patch := []byte(`{"metadata":{"annotations":{"version": "v2"}}}`) + _ = c.Patch(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "name", + }, + }, client.RawPatch(types.StrategicMergePatchType, patch)) +} + +// This example shows how to use the client with typed and unstructured objects to patch objects' status. +func ExampleClient_patchStatus() { + u := &unstructured.Unstructured{} + u.Object = map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "namespace", + }, + } + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "batch", + Version: "v1beta1", + Kind: "CronJob", + }) + patch := []byte(fmt.Sprintf(`{"status":{"lastScheduleTime":"%s"}}`, time.Now().Format(time.RFC3339))) + _ = c.Status().Patch(context.Background(), u, client.RawPatch(types.MergePatchType, patch)) +} + +// This example shows how to use the client with typed and unstructured objects to delete objects. +func ExampleClient_delete() { + // Using a typed object. + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "name", + }, + } + // c is a created client. + _ = c.Delete(context.Background(), pod) + + // Using a unstructured object. + u := &unstructured.Unstructured{} + u.SetName("name") + u.SetNamespace("namespace") + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.Delete(context.Background(), u) +} + +// This example shows how to use the client with typed and unstrucurted objects to delete collections of objects. +func ExampleClient_deleteAllOf() { + // Using a typed object. + // c is a created client. + _ = c.DeleteAllOf(context.Background(), &corev1.Pod{}, client.InNamespace("foo"), client.MatchingLabels{"app": "foo"}) + + // Using an unstructured Object + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.DeleteAllOf(context.Background(), u, client.InNamespace("foo"), client.MatchingLabels{"app": "foo"}) +} + +// This example shows how to set up and consume a field selector over a pod's volumes' secretName field. +func ExampleFieldIndexer_secretName() { + // someIndexer is a FieldIndexer over a Cache + _ = someIndexer.IndexField(context.TODO(), &corev1.Pod{}, "spec.volumes.secret.secretName", func(o client.Object) []string { + var res []string + for _, vol := range o.(*corev1.Pod).Spec.Volumes { + if vol.Secret == nil { + continue + } + // just return the raw field value -- the indexer will take care of dealing with namespaces for us + res = append(res, vol.Secret.SecretName) + } + return res + }) + + // elsewhere (e.g. in your reconciler) + mySecretName := "someSecret" // derived from the reconcile.Request, for instance + var podsWithSecrets corev1.PodList + _ = c.List(context.Background(), &podsWithSecrets, client.MatchingFields{"spec.volumes.secret.secretName": mySecretName}) +} diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go new file mode 100644 index 0000000000..b7ca2de47a --- /dev/null +++ b/pkg/client/fake/client.go @@ -0,0 +1,787 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +type versionedTracker struct { + testing.ObjectTracker + scheme *runtime.Scheme +} + +type fakeClient struct { + tracker versionedTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper + schemeWriteLock sync.Mutex +} + +var _ client.WithWatch = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// NewFakeClient creates a new fake client for testing. +// You can choose to initialize it with a slice of runtime.Object. +// +// Deprecated: Please use NewClientBuilder instead. +func NewFakeClient(initObjs ...runtime.Object) client.WithWatch { + return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() +} + +// NewFakeClientWithScheme creates a new fake client with the given scheme +// for testing. +// You can choose to initialize it with a slice of runtime.Object. +// +// Deprecated: Please use NewClientBuilder instead. +func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.WithWatch { + return NewClientBuilder().WithScheme(clientScheme).WithRuntimeObjects(initObjs...).Build() +} + +// NewClientBuilder returns a new builder to create a fake client. +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{} +} + +// ClientBuilder builds a fake client. +type ClientBuilder struct { + scheme *runtime.Scheme + restMapper meta.RESTMapper + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object + objectTracker testing.ObjectTracker +} + +// WithScheme sets this builder's internal scheme. +// If not set, defaults to client-go's global scheme.Scheme. +func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder { + f.scheme = scheme + return f +} + +// WithRESTMapper sets this builder's restMapper. +// The restMapper is directly set as mapper in the Client. This can be used for example +// with a meta.DefaultRESTMapper to provide a static rest mapping. +// If not set, defaults to an empty meta.DefaultRESTMapper. +func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder { + f.restMapper = restMapper + return f +} + +// WithObjects can be optionally used to initialize this fake client with client.Object(s). +func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder { + f.initObject = append(f.initObject, initObjs...) + return f +} + +// WithLists can be optionally used to initialize this fake client with client.ObjectList(s). +func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder { + f.initLists = append(f.initLists, initLists...) + return f +} + +// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s). +func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder { + f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...) + return f +} + +// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker. +func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder { + f.objectTracker = ot + return f +} + +// Build builds and returns a new fake client. +func (f *ClientBuilder) Build() client.WithWatch { + if f.scheme == nil { + f.scheme = scheme.Scheme + } + if f.restMapper == nil { + f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + } + + var tracker versionedTracker + + if f.objectTracker == nil { + tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme} + } else { + tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme} + } + + for _, obj := range f.initObject { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initLists { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initRuntimeObjects { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) + } + } + return &fakeClient{ + tracker: tracker, + scheme: f.scheme, + restMapper: f.restMapper, + } +} + +const trackerAddResourceVersion = "999" + +func (t versionedTracker) Add(obj runtime.Object) error { + var objects []runtime.Object + if meta.IsListType(obj) { + var err error + objects, err = meta.ExtractList(obj) + if err != nil { + return err + } + } else { + objects = []runtime.Object{obj} + } + for _, obj := range objects { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetResourceVersion() == "" { + // We use a "magic" value of 999 here because this field + // is parsed as uint and and 0 is already used in Update. + // As we can't go lower, go very high instead so this can + // be recognized + accessor.SetResourceVersion(trackerAddResourceVersion) + } + + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Add(obj); err != nil { + return err + } + } + + return nil +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + accessor.SetResourceVersion("") + return err + } + + return nil +} + +// convertFromUnstructuredIfNecessary will convert *unstructured.Unstructured for a GVK that is recocnized +// by the schema into the whatever the schema produces with New() for said GVK. +// This is required because the tracker unconditionally saves on manipulations, but its List() implementation +// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure +// we save as the very same type, otherwise subsequent List requests will fail. +func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { + u, isUnstructured := o.(*unstructured.Unstructured) + if !isUnstructured || !s.Recognizes(u.GroupVersionKind()) { + return o, nil + } + + typed, err := s.New(u.GroupVersionKind()) + if err != nil { + return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", u.GroupVersionKind().String(), err) + } + + unstructuredSerialized, err := json.Marshal(u) + if err != nil { + return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err) + } + if err := json.Unmarshal(unstructuredSerialized, typed); err != nil { + return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err) + } + + return typed, nil +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + gvk, err = apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + } + + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) + if err != nil { + // If the resource is not found and the resource allows create on update, issue a + // create instead. + if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) { + return t.Create(gvr, obj, ns) + } + return err + } + + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return err + } + + // If the new object does not have the resource version set and it allows unconditional update, + // default it to the resource version of the existing resource + if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) { + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + } + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { + return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) + } + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + return t.ObjectTracker.Update(gvr, obj, ns) +} + +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + o, err := c.tracker.Get(gvr, key.Namespace, key.Name) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return nil, err + } + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return c.tracker.Watch(gvr, listOpts.Namespace) +} + +func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + originalKind := gvk.Kind + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) { + // We need to register the ListKind with UnstructuredList: + // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 + c.schemeWriteLock.Lock() + c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{}) + c.schemeWriteLock.Unlock() + } + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) + if err != nil { + return err + } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(originalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + if err != nil { + return err + } + + if listOpts.LabelSelector != nil { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector) + if err != nil { + return err + } + err = meta.SetList(obj, filteredObjs) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeClient) RESTMapper() meta.RESTMapper { + return c.restMapper +} + +func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + createOptions := &client.CreateOptions{} + createOptions.ApplyOptions(opts) + + for _, dryRunOpt := range createOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + + return c.tracker.Create(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) + + for _, dryRunOpt := range delOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + // Check the ResourceVersion if that Precondition was specified. + if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { + name := accessor.GetName() + dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(dbObj) + if err != nil { + return err + } + actualRV := oldAccessor.GetResourceVersion() + expectRV := *delOptions.Preconditions.ResourceVersion + if actualRV != expectRV { + msg := fmt.Sprintf( + "the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+ + "The object might have been modified", + expectRV, actualRV) + return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg)) + } + } + + return c.deleteObject(gvr, accessor) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + for _, dryRunOpt := range dcOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) + if err != nil { + return err + } + + objs, err := meta.ExtractList(o) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) + if err != nil { + return err + } + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.deleteObject(gvr, accessor) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) + + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + return c.tracker.Update(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + data, err := patch.Data(obj) + if err != nil { + return err + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) Status() client.StatusWriter { + return &fakeStatusWriter{client: c} +} + +func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { + old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err == nil { + oldAccessor, err := meta.Accessor(old) + if err == nil { + if len(oldAccessor.GetFinalizers()) > 0 { + now := metav1.Now() + oldAccessor.SetDeletionTimestamp(&now) + return c.tracker.Update(gvr, old, accessor.GetNamespace()) + } + } + } + + //TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeStatusWriter struct { + client *fakeClient +} + +func (sw *fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Update(ctx, obj, opts...) +} + +func (sw *fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + // TODO(droot): This results in full update of the obj (spec + status). Need + // a way to update status field only. + return sw.client.Patch(ctx, obj, patch, opts...) +} + +func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "apps": + switch gvk.Kind { + case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet": + return true + } + case "autoscaling": + switch gvk.Kind { + case "HorizontalPodAutoscaler": + return true + } + case "batch": + switch gvk.Kind { + case "CronJob", "Job": + return true + } + case "certificates": + switch gvk.Kind { + case "Certificates": + return true + } + case "flowcontrol": + switch gvk.Kind { + case "FlowSchema", "PriorityLevelConfiguration": + return true + } + case "networking": + switch gvk.Kind { + case "Ingress", "IngressClass", "NetworkPolicy": + return true + } + case "policy": + switch gvk.Kind { + case "PodSecurityPolicy": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "scheduling": + switch gvk.Kind { + case "PriorityClass": + return true + } + case "settings": + switch gvk.Kind { + case "PodPreset": + return true + } + case "storage": + switch gvk.Kind { + case "StorageClass": + return true + } + case "": + switch gvk.Kind { + case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node", + "PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate", + "ReplicationController", "ResourceQuota", "Secret", "Service", + "ServiceAccount", "EndpointSlice": + return true + } + } + + return false +} + +func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "coordination": + switch gvk.Kind { + case "Lease": + return true + } + case "node": + switch gvk.Kind { + case "RuntimeClass": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "": + switch gvk.Kind { + case "Endpoint", "Event", "LimitRange", "Service": + return true + } + } + + return false +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/pkg/client/fake/client_suite_test.go b/pkg/client/fake/client_suite_test.go new file mode 100644 index 0000000000..ac5540106e --- /dev/null +++ b/pkg/client/fake/client_suite_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Fake client Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}, 60) diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go new file mode 100644 index 0000000000..5ce93a8cdc --- /dev/null +++ b/pkg/client/fake/client_test.go @@ -0,0 +1,1056 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/client-go/kubernetes/fake" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + appsv1 "k8s.io/api/apps/v1" + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Fake client", func() { + var dep *appsv1.Deployment + var dep2 *appsv1.Deployment + var cm *corev1.ConfigMap + var cl client.WithWatch + + BeforeEach(func() { + dep = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "ns1", + ResourceVersion: trackerAddResourceVersion, + }, + } + dep2 = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-2", + Namespace: "ns1", + Labels: map[string]string{ + "test-label": "label-value", + }, + ResourceVersion: trackerAddResourceVersion, + }, + } + cm = &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: trackerAddResourceVersion, + }, + Data: map[string]string{ + "test-key": "test-value", + }, + } + }) + + AssertClientBehavior := func() { + It("should be able to Get", func() { + By("Getting a deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + err := cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(dep)) + }) + + It("should be able to Get using unstructured", func() { + By("Getting a deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("apps/v1") + obj.SetKind("Deployment") + err := cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + }) + + It("should be able to List", func() { + By("Listing all deployments in a namespace") + list := &appsv1.DeploymentList{} + err := cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(2)) + Expect(list.Items).To(ConsistOf(*dep, *dep2)) + }) + + It("should be able to List using unstructured list", func() { + By("Listing all deployments in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("apps/v1") + list.SetKind("DeploymentList") + err := cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(2)) + }) + + It("should be able to List using unstructured list when setting a non-list kind", func() { + By("Listing all deployments in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("apps/v1") + list.SetKind("Deployment") + err := cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(2)) + }) + + It("should be able to retrieve registered objects that got manipulated as unstructured", func() { + list := func() { + By("Listing all endpoints in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("v1") + list.SetKind("EndpointsList") + err := cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + } + + unstructuredEndpoint := func() *unstructured.Unstructured { + item := &unstructured.Unstructured{} + item.SetAPIVersion("v1") + item.SetKind("Endpoints") + item.SetName("test-endpoint") + item.SetNamespace("ns1") + return item + } + + By("Adding the object during client initialization") + cl = NewFakeClient(unstructuredEndpoint()) + list() + Expect(cl.Delete(context.Background(), unstructuredEndpoint())).To(BeNil()) + + By("Creating an object") + item := unstructuredEndpoint() + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + list() + + By("Updating the object") + item.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(context.Background(), item) + Expect(err).To(BeNil()) + list() + + By("Patching the object") + old := item.DeepCopy() + item.SetAnnotations(map[string]string{"bar": "baz"}) + err = cl.Patch(context.Background(), item, client.MergeFrom(old)) + Expect(err).To(BeNil()) + list() + }) + + It("should be able to Create an unregistered type using unstructured", func() { + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v1") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + }) + + It("should be able to Get an unregisted type using unstructured", func() { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v2") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + + By("Getting and the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v2") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(context.Background(), client.ObjectKeyFromObject(item), item) + Expect(err).To(BeNil()) + }) + + It("should be able to List an unregistered type using unstructured", func() { + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("custom/v3") + list.SetKind("ImageList") + err := cl.List(context.Background(), list) + Expect(err).To(BeNil()) + }) + + It("should be able to List an unregistered type using unstructured", func() { + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("custom/v4") + list.SetKind("Image") + err := cl.List(context.Background(), list) + Expect(err).To(BeNil()) + }) + + It("should be able to Update an unregistered type using unstructured", func() { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v5") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + + By("Updating the object") + err = unstructured.SetNestedField(item.Object, int64(2), "spec", "replicas") + Expect(err).To(BeNil()) + err = cl.Update(context.Background(), item) + Expect(err).To(BeNil()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v5") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(context.Background(), client.ObjectKeyFromObject(item), item) + Expect(err).To(BeNil()) + + By("Inspecting the object") + value, found, err := unstructured.NestedInt64(item.Object, "spec", "replicas") + Expect(err).To(BeNil()) + Expect(found).To(BeTrue()) + Expect(value).To(Equal(int64(2))) + }) + + It("should be able to Patch an unregistered type using unstructured", func() { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v6") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + + By("Updating the object") + original := item.DeepCopy() + err = unstructured.SetNestedField(item.Object, int64(2), "spec", "replicas") + Expect(err).To(BeNil()) + err = cl.Patch(context.Background(), item, client.MergeFrom(original)) + Expect(err).To(BeNil()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v6") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(context.Background(), client.ObjectKeyFromObject(item), item) + Expect(err).To(BeNil()) + + By("Inspecting the object") + value, found, err := unstructured.NestedInt64(item.Object, "spec", "replicas") + Expect(err).To(BeNil()) + Expect(found).To(BeTrue()) + Expect(value).To(Equal(int64(2))) + }) + + It("should be able to Delete an unregistered type using unstructured", func() { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v7") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(context.Background(), item) + Expect(err).To(BeNil()) + + By("Deleting the object") + err = cl.Delete(context.Background(), item) + Expect(err).To(BeNil()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v7") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(context.Background(), client.ObjectKeyFromObject(item), item) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should support filtering by labels and their values", func() { + By("Listing deployments with a particular label and value") + list := &appsv1.DeploymentList{} + err := cl.List(context.Background(), list, client.InNamespace("ns1"), + client.MatchingLabels(map[string]string{ + "test-label": "label-value", + })) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should support filtering by label existence", func() { + By("Listing deployments with a particular label") + list := &appsv1.DeploymentList{} + err := cl.List(context.Background(), list, client.InNamespace("ns1"), + client.HasLabels{"test-label"}) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should be able to Create", func() { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + }, + } + err := cl.Create(context.Background(), newcm) + Expect(err).To(BeNil()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "new-test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1")) + }) + + It("should error on create with set resourceVersion", func() { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + } + err := cl.Create(context.Background(), newcm) + Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + It("should not change the submitted object if Create failed", func() { + By("Trying to create an existing configmap") + submitted := cm.DeepCopy() + submitted.ResourceVersion = "" + submittedReference := submitted.DeepCopy() + err := cl.Create(context.Background(), submitted) + Expect(err).ToNot(BeNil()) + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + Expect(submitted).To(Equal(submittedReference)) + }) + + It("should error on Create with empty Name", func() { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns2", + }, + } + err := cl.Create(context.Background(), newcm) + Expect(err.Error()).To(Equal("ConfigMap \"\" is invalid: metadata.name: Required value: name is required")) + }) + + It("should error on Update with empty Name", func() { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns2", + }, + } + err := cl.Update(context.Background(), newcm) + Expect(err.Error()).To(Equal("ConfigMap \"\" is invalid: metadata.name: Required value: name is required")) + }) + + It("should be able to Create with GenerateName", func() { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "new-test-cm", + Namespace: "ns2", + Labels: map[string]string{ + "test-label": "label-value", + }, + }, + } + err := cl.Create(context.Background(), newcm) + Expect(err).To(BeNil()) + + By("Listing configmaps with a particular label") + list := &corev1.ConfigMapList{} + err = cl.List(context.Background(), list, client.InNamespace("ns2"), + client.MatchingLabels(map[string]string{ + "test-label": "label-value", + })) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items[0].Name).NotTo(BeEmpty()) + }) + + It("should be able to Update", func() { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(context.Background(), newcm) + Expect(err).To(BeNil()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) + }) + + It("should allow updates with non-set ResourceVersion for a resource that allows unconditional updates", func() { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(context.Background(), newcm) + Expect(err).To(BeNil()) + + By("Getting the configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) + }) + + It("should reject updates with non-set ResourceVersion for a resource that doesn't allow unconditional updates", func() { + By("Creating a new binding") + binding := &corev1.Binding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Binding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "ns2", + }, + Target: corev1.ObjectReference{ + Kind: "ConfigMap", + APIVersion: "v1", + Namespace: cm.Namespace, + Name: cm.Name, + }, + } + Expect(cl.Create(context.Background(), binding)).To(Succeed()) + + By("Updating the binding with a new resource lacking resource version") + newBinding := &corev1.Binding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Binding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: binding.Name, + Namespace: binding.Namespace, + }, + Target: corev1.ObjectReference{ + Namespace: binding.Namespace, + Name: "blue", + }, + } + Expect(cl.Update(context.Background(), newBinding)).NotTo(Succeed()) + }) + + It("should allow create on update for a resource that allows create on update", func() { + By("Creating a new lease with update") + lease := &coordinationv1.Lease{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "coordination.k8s.io/v1", + Kind: "Lease", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lease", + Namespace: "ns2", + }, + Spec: coordinationv1.LeaseSpec{}, + } + Expect(cl.Create(context.Background(), lease)).To(Succeed()) + + By("Getting the lease") + namespacedName := types.NamespacedName{ + Name: lease.Name, + Namespace: lease.Namespace, + } + obj := &coordinationv1.Lease{} + Expect(cl.Get(context.Background(), namespacedName, obj)).To(Succeed()) + Expect(obj).To(Equal(lease)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1")) + }) + + It("should reject create on update for a resource that does not allow create on update", func() { + By("Attemping to create a new configmap with update") + newcm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "different-test-cm", + Namespace: "ns2", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + Expect(cl.Update(context.Background(), newcm)).NotTo(Succeed()) + }) + + It("should reject updates with non-matching ResourceVersion", func() { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(context.Background(), newcm) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + + By("Getting the configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) + }) + + It("should reject Delete with a mismatched ResourceVersion", func() { + bogusRV := "bogus" + By("Deleting with a mismatched ResourceVersion Precondition") + err := cl.Delete(context.Background(), dep, client.Preconditions{ResourceVersion: &bogusRV}) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + + list := &appsv1.DeploymentList{} + err = cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(2)) + Expect(list.Items).To(ConsistOf(*dep, *dep2)) + }) + + It("should successfully Delete with a matching ResourceVersion", func() { + goodRV := trackerAddResourceVersion + By("Deleting with a matching ResourceVersion Precondition") + err := cl.Delete(context.Background(), dep, client.Preconditions{ResourceVersion: &goodRV}) + Expect(err).To(BeNil()) + + list := &appsv1.DeploymentList{} + err = cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should be able to Delete with no ResourceVersion Precondition", func() { + By("Deleting a deployment") + err := cl.Delete(context.Background(), dep) + Expect(err).To(BeNil()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should be able to Delete with no opts even if object's ResourceVersion doesn't match server", func() { + By("Deleting a deployment") + depCopy := dep.DeepCopy() + depCopy.ResourceVersion = "bogus" + err := cl.Delete(context.Background(), depCopy) + Expect(err).To(BeNil()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should handle finalizers on Update", func() { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "delete-with-finalizers", + } + By("Updating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(context.Background(), newObj) + Expect(err).To(BeNil()) + + By("Deleting the object") + err = cl.Delete(context.Background(), newObj) + Expect(err).To(BeNil()) + + By("Getting the object") + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj.DeletionTimestamp).NotTo(BeNil()) + + By("Removing the finalizer") + obj.Finalizers = []string{} + err = cl.Update(context.Background(), obj) + Expect(err).To(BeNil()) + + By("Getting the object") + obj = &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should be able to Delete a Collection", func() { + By("Deleting a deploymentList") + err := cl.DeleteAllOf(context.Background(), &appsv1.Deployment{}, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(context.Background(), list, client.InNamespace("ns1")) + Expect(err).To(BeNil()) + Expect(list.Items).To(BeEmpty()) + }) + + It("should handle finalizers deleting a collection", func() { + for i := 0; i < 5; i++ { + namespacedName := types.NamespacedName{ + Name: fmt.Sprintf("test-cm-%d", i), + Namespace: "delete-collection-with-finalizers", + } + By("Creating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(context.Background(), newObj) + Expect(err).To(BeNil()) + } + + By("Deleting the object") + err := cl.DeleteAllOf(context.Background(), &corev1.ConfigMap{}, client.InNamespace("delete-collection-with-finalizers")) + Expect(err).To(BeNil()) + + configmaps := corev1.ConfigMapList{} + err = cl.List(context.Background(), &configmaps, client.InNamespace("delete-collection-with-finalizers")) + Expect(err).To(BeNil()) + + Expect(len(configmaps.Items)).To(Equal(5)) + for _, cm := range configmaps.Items { + Expect(cm.DeletionTimestamp).NotTo(BeNil()) + } + }) + + It("should be able to watch", func() { + By("Creating a watch") + objWatch, err := cl.Watch(context.Background(), &corev1.ServiceList{}) + Expect(err).NotTo(HaveOccurred()) + + defer objWatch.Stop() + + go func() { + defer GinkgoRecover() + // It is likely starting a new goroutine is slower than progressing + // in the outer routine, sleep to make sure this is always true + time.Sleep(100 * time.Millisecond) + + err := cl.Create(context.Background(), &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "for-watch"}}) + Expect(err).ToNot(HaveOccurred()) + }() + + event, ok := <-objWatch.ResultChan() + Expect(ok).To(BeTrue()) + Expect(event.Type).To(Equal(watch.Added)) + + service, ok := event.Object.(*corev1.Service) + Expect(ok).To(BeTrue()) + Expect(service.Name).To(Equal("for-watch")) + }) + + Context("with the DryRun option", func() { + It("should not create a new object", func() { + By("Creating a new configmap with DryRun") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + }, + } + err := cl.Create(context.Background(), newcm, client.DryRunAll) + Expect(err).To(BeNil()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "new-test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + Expect(obj).NotTo(Equal(newcm)) + }) + + It("should not Update the object", func() { + By("Updating a new configmap with DryRun") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(context.Background(), newcm, client.DryRunAll) + Expect(err).To(BeNil()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) + }) + + It("Should not Delete the object", func() { + By("Deleting a configmap with DryRun with Delete()") + err := cl.Delete(context.Background(), cm, client.DryRunAll) + Expect(err).To(BeNil()) + + By("Deleting a configmap with DryRun with DeleteAllOf()") + err = cl.DeleteAllOf(context.Background(), cm, client.DryRunAll) + Expect(err).To(BeNil()) + + By("Getting the configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) + }) + }) + + It("should be able to Patch", func() { + By("Patching a deployment") + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + err = cl.Patch(context.Background(), dep, client.RawPatch(types.StrategicMergePatchType, mergePatch)) + Expect(err).NotTo(HaveOccurred()) + + By("Getting the patched deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(obj.Annotations["foo"]).To(Equal("bar")) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) + }) + + It("should handle finalizers on Patch", func() { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "delete-with-finalizers", + } + By("Updating a new object") + now := metav1.Now() + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + DeletionTimestamp: &now, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(context.Background(), newObj) + Expect(err).To(BeNil()) + + By("Removing the finalizer") + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{}, + DeletionTimestamp: &now, + }, + } + obj.Finalizers = []string{} + err = cl.Patch(context.Background(), obj, client.MergeFrom(newObj)) + Expect(err).To(BeNil()) + + By("Getting the object") + obj = &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, obj) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should remove finalizers of the object on Patch", func() { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "patch-finalizers-in-obj", + } + By("Creating a new object") + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(context.Background(), obj) + Expect(err).To(BeNil()) + + By("Removing the finalizer") + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "$deleteFromPrimitiveList/finalizers": []string{ + "finalizers.sigs.k8s.io/test", + }, + }, + }) + Expect(err).To(BeNil()) + err = cl.Patch(context.Background(), obj, client.RawPatch(types.StrategicMergePatchType, mergePatch)) + Expect(err).To(BeNil()) + + By("Check the finalizer has been removed in the object") + Expect(len(obj.Finalizers)).To(Equal(0)) + + By("Check the finalizer has been removed in client") + newObj := &corev1.ConfigMap{} + err = cl.Get(context.Background(), namespacedName, newObj) + Expect(err).To(BeNil()) + Expect(len(newObj.Finalizers)).To(Equal(0)) + }) + } + + Context("with default scheme.Scheme", func() { + BeforeEach(func() { + cl = NewClientBuilder(). + WithObjects(dep, dep2, cm). + Build() + }) + AssertClientBehavior() + }) + + Context("with given scheme", func() { + BeforeEach(func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + Expect(coordinationv1.AddToScheme(scheme)).To(Succeed()) + cl = NewClientBuilder(). + WithScheme(scheme). + WithObjects(cm). + WithLists(&appsv1.DeploymentList{Items: []appsv1.Deployment{*dep, *dep2}}). + Build() + }) + AssertClientBehavior() + }) + + It("should set the ResourceVersion to 999 when adding an object to the tracker", func() { + cl := NewClientBuilder().WithObjects(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "cm"}}).Build() + + retrieved := &corev1.Secret{} + Expect(cl.Get(context.Background(), types.NamespacedName{Name: "cm"}, retrieved)).To(Succeed()) + + reference := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + ResourceVersion: "999", + }, + } + Expect(retrieved).To(Equal(reference)) + }) + + It("should be able to build with given tracker and get resource", func() { + clientSet := fake.NewSimpleClientset(dep) + cl := NewClientBuilder().WithRuntimeObjects(dep2).WithObjectTracker(clientSet.Tracker()).Build() + + By("Getting a deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + err := cl.Get(context.Background(), namespacedName, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(dep)) + + By("Getting a deployment from clientSet") + csDep2, err := clientSet.AppsV1().Deployments("ns1").Get(context.Background(), "test-deployment-2", metav1.GetOptions{}) + Expect(err).To(BeNil()) + Expect(csDep2).To(Equal(dep2)) + + By("Getting a new deployment") + namespacedName3 := types.NamespacedName{ + Name: "test-deployment-3", + Namespace: "ns1", + } + + dep3 := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-3", + Namespace: "ns1", + Labels: map[string]string{ + "test-label": "label-value", + }, + ResourceVersion: trackerAddResourceVersion, + }, + } + + _, err = clientSet.AppsV1().Deployments("ns1").Create(context.Background(), dep3, metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + obj = &appsv1.Deployment{} + err = cl.Get(context.Background(), namespacedName3, obj) + Expect(err).To(BeNil()) + Expect(obj).To(Equal(dep3)) + }) +}) diff --git a/pkg/client/fake/doc.go b/pkg/client/fake/doc.go new file mode 100644 index 0000000000..d0614666e3 --- /dev/null +++ b/pkg/client/fake/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake provides a fake client for testing. + +A fake client is backed by its simple object store indexed by GroupVersionResource. +You can create a fake client with optional objects. + + client := NewFakeClientWithScheme(scheme, initObjs...) // initObjs is a slice of runtime.Object + +You can invoke the methods defined in the Client interface. + +When in doubt, it's almost always better not to use this package and instead use +envtest.Environment with a real client and API server. + +WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️ + - This client does not have a way to inject specific errors to test handled vs. unhandled errors. + - There is some support for sub resources which can cause issues with tests if you're trying to update + e.g. metadata and status in the same reconcile. + - No OpenAPI validation is performed when creating or updating objects. + - ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update + operations that rely on these fields will fail, or give false positives. +*/ +package fake diff --git a/pkg/client/interfaces.go b/pkg/client/interfaces.go new file mode 100644 index 0000000000..7f8f8f31c6 --- /dev/null +++ b/pkg/client/interfaces.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} + +// IgnoreAlreadyExists returns nil on AlreadyExists errors. +// All other values that are not AlreadyExists errors or nil are returned unmodified. +func IgnoreAlreadyExists(err error) error { + if apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} diff --git a/pkg/client/metadata_client.go b/pkg/client/metadata_client.go new file mode 100644 index 0000000000..2854556f32 --- /dev/null +++ b/pkg/client/metadata_client.go @@ -0,0 +1,196 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/pkg/client/namespaced_client.go b/pkg/client/namespaced_client.go new file mode 100644 index 0000000000..674fe253d8 --- /dev/null +++ b/pkg/client/namespaced_client.go @@ -0,0 +1,213 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// Create implements client.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter. +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/pkg/client/namespaced_client_test.go b/pkg/client/namespaced_client_test.go new file mode 100644 index 0000000000..5b8f3388c8 --- /dev/null +++ b/pkg/client/namespaced_client_test.go @@ -0,0 +1,583 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "encoding/json" + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + rbacv1 "k8s.io/api/rbac/v1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("NamespacedClient", func() { + var dep *appsv1.Deployment + var ns = "default" + ctx := context.Background() + var count uint64 = 0 + var replicaCount int32 = 2 + + getClient := func() client.Client { + var sch = runtime.NewScheme() + + err := rbacv1.AddToScheme(sch) + Expect(err).To(BeNil()) + err = appsv1.AddToScheme(sch) + Expect(err).To(BeNil()) + + nonNamespacedClient, err := client.New(cfg, client.Options{Scheme: sch}) + Expect(err).NotTo(HaveOccurred()) + Expect(nonNamespacedClient).NotTo(BeNil()) + return client.NewNamespacedClient(nonNamespacedClient, ns) + } + + BeforeEach(func() { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("namespaced-deployment-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("namespaced-deployment-%v", count)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + }) + + Describe("Get", func() { + + BeforeEach(func() { + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + It("should successfully Get a namespace-scoped object", func() { + name := types.NamespacedName{Name: dep.Name} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(dep)) + }) + + It("should error when namespace provided in the object is different than the one "+ + "specified in client", func() { + name := types.NamespacedName{Name: dep.Name, Namespace: "non-default"} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).To(HaveOccurred()) + }) + }) + + Describe("List", func() { + BeforeEach(func() { + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully List objects when namespace is not specified with the object", func() { + result := &appsv1.DeploymentList{} + opts := client.MatchingLabels(dep.Labels) + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + + It("should List objects from the namespace specified in the client", func() { + result := &appsv1.DeploymentList{} + opts := client.InNamespace("non-default") + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + }) + + Describe("Create", func() { + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully create object in the right namespace", func() { + By("creating the object initially") + err := getClient().Create(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("checking if the object was created in the right namespace") + res, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.GetNamespace()).To(BeEquivalentTo(ns)) + }) + + It("should not create object if the namespace of the object is different", func() { + By("creating the object initially") + dep.SetNamespace("non-default") + err := getClient().Create(ctx, dep) + Expect(err).To(HaveOccurred()) + }) + It("should create a cluster scoped object", func() { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + cr.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("creating the object initially") + err := getClient().Create(ctx, cr) + Expect(err).NotTo(HaveOccurred()) + + By("checking if the object was created") + res, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + // Delete the clusterRole Resource + deleteClusterRole(ctx, cr) + }) + }) + + Describe("Update", func() { + var err error + BeforeEach(func() { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + dep.Annotations = map[string]string{"foo": "bar"} + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully update the provided object", func() { + By("updating the Deployment") + err = getClient().Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating if the updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(Equal(ns)) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should successfully update the provided object when namespace is not provided", func() { + By("updating the Deployment") + dep.SetNamespace("") + err = getClient().Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating if the updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(Equal(ns)) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should not update when object namespace is different", func() { + By("updating the Deployment") + dep.SetNamespace("non-default") + err = getClient().Update(ctx, dep) + Expect(err).To(HaveOccurred()) + }) + + It("should not update any object from other namespace", func() { + By("creating a new namespace") + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + changedDep.Annotations = map[string]string{"foo": "bar"} + + By("creating the object initially") + _, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the object") + err = getClient().Update(ctx, changedDep) + Expect(err).To(HaveOccurred()) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + + It("should update a cluster scoped resource", func() { + changedCR := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + + changedCR.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("Setting annotations and creating the resource") + changedCR.Annotations = map[string]string{"foo": "bar"} + changedCR, err = clientset.RbacV1().ClusterRoles().Create(ctx, changedCR, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the deployment") + err = getClient().Update(ctx, changedCR) + + By("validating if the cluster role was update") + actual, err := clientset.RbacV1().ClusterRoles().Get(ctx, changedCR.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + + // delete cluster role resource + deleteClusterRole(ctx, changedCR) + }) + + }) + + Describe("Patch", func() { + var err error + BeforeEach(func() { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully modify the object using Patch", func() { + By("Applying Patch") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("validating patched Deployment has new annotations") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(actual.GetNamespace()).To(Equal(ns)) + }) + + It("should successfully modify the object using Patch when namespace is not provided", func() { + By("Applying Patch") + dep.SetNamespace("") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("validating patched Deployment has new annotations") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(actual.GetNamespace()).To(Equal(ns)) + }) + + It("should not modify the object when namespace of the object is different", func() { + dep.SetNamespace("non-default") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).To(HaveOccurred()) + }) + + It("should not modify an object from a different namespace", func() { + By("creating a new namespace") + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-2"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + By("creating the object initially") + changedDep, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = getClient().Patch(ctx, changedDep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).To(HaveOccurred()) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + + It("should successfully modify cluster scoped resource", func() { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + + cr.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("creating the resource") + cr, err = clientset.RbacV1().ClusterRoles().Create(ctx, cr, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("Applying Patch") + err = getClient().Patch(ctx, cr, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("Validating the patch") + actual, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + + // delete the resource + deleteClusterRole(ctx, cr) + }) + }) + + Describe("Delete and DeleteAllOf", func() { + var err error + BeforeEach(func() { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + It("should successfully delete an object when namespace is not specified", func() { + By("deleting the object") + dep.SetNamespace("") + err = getClient().Delete(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should successfully delete all of the deployments in the given namespace", func() { + By("Deleting all objects in the namespace") + err = getClient().DeleteAllOf(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should not delete deployments in other namespaces", func() { + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-3"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + By("creating the object initially in other namespace") + changedDep, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = getClient().DeleteAllOf(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment exists") + actual, err := clientset.AppsV1().Deployments(tns.Name).Get(ctx, changedDep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).To(BeEquivalentTo(changedDep)) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + }) + + Describe("StatusWriter", func() { + var err error + BeforeEach(func() { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }) + + It("should change objects via update status", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Update(ctx, changedDep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(99)) + }) + + It("should not change objects via update status when object namespace is different", func() { + changedDep := dep.DeepCopy() + changedDep.SetNamespace("test") + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Update(ctx, changedDep)).To(HaveOccurred()) + }) + + It("should change objects via status patch", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep))).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(99)) + }) + + It("should not change objects via status patch when object namespace is different", func() { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + changedDep.SetNamespace("test") + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep))).To(HaveOccurred()) + }) + }) + + Describe("Test on invalid objects", func() { + It("should refuse to perform operations on invalid object", func() { + err := getClient().Create(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().List(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Patch(ctx, nil, client.MergeFrom(dep)) + Expect(err).To(HaveOccurred()) + + err = getClient().Update(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Delete(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Status().Patch(ctx, nil, client.MergeFrom(dep)) + Expect(err).To(HaveOccurred()) + + err = getClient().Status().Update(ctx, nil) + Expect(err).To(HaveOccurred()) + + }) + + }) +}) + +func generatePatch() []byte { + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + return mergePatch +} + +func deleteClusterRole(ctx context.Context, cr *rbacv1.ClusterRole) { + _, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + if err == nil { + err = clientset.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/pkg/client/object.go b/pkg/client/object.go new file mode 100644 index 0000000000..31e334d6c2 --- /dev/null +++ b/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/pkg/client/options.go b/pkg/client/options.go new file mode 100644 index 0000000000..495b86944c --- /dev/null +++ b/pkg/client/options.go @@ -0,0 +1,742 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// GetOption is some configuration that modifies options for a get request. +type GetOption interface { + // ApplyToGet applies this configuration to the given get options. + ApplyToGet(*GetOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ Get Options + +// GetOptions contains options for get operation. +// Now it only has a Raw field, with support for specific resourceVersion. +type GetOptions struct { + // Raw represents raw GetOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface. + Raw *metav1.GetOptions +} + +var _ GetOption = &GetOptions{} + +// ApplyToGet implements GetOption for GetOptions. +func (o *GetOptions) ApplyToGet(lo *GetOptions) { + if o.Raw != nil { + lo.Raw = o.Raw + } +} + +// AsGetOptions returns these options as a flattened metav1.GetOptions. +// This may mutate the Raw field. +func (o *GetOptions) AsGetOptions() *metav1.GetOptions { + if o == nil || o.Raw == nil { + return &metav1.GetOptions{} + } + return o.Raw +} + +// ApplyOptions applies the given get options on these options, +// and then returns itself (for convenient chaining). +func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions { + for _, opt := range opts { + opt.ApplyToGet(o) + } + return o +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use labels.Parse() to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/pkg/client/options_test.go b/pkg/client/options_test.go new file mode 100644 index 0000000000..cb1363ba54 --- /dev/null +++ b/pkg/client/options_test.go @@ -0,0 +1,240 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + utilpointer "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("ListOptions", func() { + It("Should set LabelSelector", func() { + labelSelector, err := labels.Parse("a=b") + Expect(err).NotTo(HaveOccurred()) + o := &client.ListOptions{LabelSelector: labelSelector} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set FieldSelector", func() { + o := &client.ListOptions{FieldSelector: fields.Nothing()} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Namespace", func() { + o := &client.ListOptions{Namespace: "my-ns"} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.ListOptions{Raw: &metav1.ListOptions{FieldSelector: "Hans"}} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Limit", func() { + o := &client.ListOptions{Limit: int64(1)} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Continue", func() { + o := &client.ListOptions{Continue: "foo"} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.ListOptions{} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) +}) + +var _ = Describe("GetOptions", func() { + It("Should set Raw", func() { + o := &client.GetOptions{Raw: &metav1.GetOptions{ResourceVersion: "RV0"}} + newGetOpts := &client.GetOptions{} + o.ApplyToGet(newGetOpts) + Expect(newGetOpts).To(Equal(o)) + }) +}) + +var _ = Describe("CreateOptions", func() { + It("Should set DryRun", func() { + o := &client.CreateOptions{DryRun: []string{"Hello", "Theodore"}} + newCreatOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreatOpts) + Expect(newCreatOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.CreateOptions{FieldManager: "FieldManager"} + newCreatOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreatOpts) + Expect(newCreatOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.CreateOptions{Raw: &metav1.CreateOptions{DryRun: []string{"Bye", "Theodore"}}} + newCreatOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreatOpts) + Expect(newCreatOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.CreateOptions{} + newCreatOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreatOpts) + Expect(newCreatOpts).To(Equal(o)) + }) +}) + +var _ = Describe("DeleteOptions", func() { + It("Should set GracePeriodSeconds", func() { + o := &client.DeleteOptions{GracePeriodSeconds: utilpointer.Int64Ptr(42)} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set Preconditions", func() { + o := &client.DeleteOptions{Preconditions: &metav1.Preconditions{}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set PropagationPolicy", func() { + policy := metav1.DeletePropagationBackground + o := &client.DeleteOptions{PropagationPolicy: &policy} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.DeleteOptions{Raw: &metav1.DeleteOptions{}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set DryRun", func() { + o := &client.DeleteOptions{DryRun: []string{"Hello", "Pippa"}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.DeleteOptions{} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) +}) + +var _ = Describe("UpdateOptions", func() { + It("Should set DryRun", func() { + o := &client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.UpdateOptions{FieldManager: "Hello Boris"} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.UpdateOptions{Raw: &metav1.UpdateOptions{}} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.UpdateOptions{} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) +}) + +var _ = Describe("PatchOptions", func() { + It("Should set DryRun", func() { + o := &client.PatchOptions{DryRun: []string{"Bye", "Boris"}} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set Force", func() { + o := &client.PatchOptions{Force: utilpointer.BoolPtr(true)} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.PatchOptions{FieldManager: "Hello Julian"} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.PatchOptions{Raw: &metav1.PatchOptions{}} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.PatchOptions{} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) +}) + +var _ = Describe("DeleteAllOfOptions", func() { + It("Should set ListOptions", func() { + o := &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Raw: &metav1.ListOptions{}}} + newDeleteAllOfOpts := &client.DeleteAllOfOptions{} + o.ApplyToDeleteAllOf(newDeleteAllOfOpts) + Expect(newDeleteAllOfOpts).To(Equal(o)) + }) + It("Should set DeleleteOptions", func() { + o := &client.DeleteAllOfOptions{DeleteOptions: client.DeleteOptions{GracePeriodSeconds: utilpointer.Int64Ptr(44)}} + newDeleteAllOfOpts := &client.DeleteAllOfOptions{} + o.ApplyToDeleteAllOf(newDeleteAllOfOpts) + Expect(newDeleteAllOfOpts).To(Equal(o)) + }) +}) + +var _ = Describe("MatchingLabels", func() { + It("Should produce an invalid selector when given invalid input", func() { + matchingLabels := client.MatchingLabels(map[string]string{"k": "axahm2EJ8Phiephe2eixohbee9eGeiyees1thuozi1xoh0GiuH3diewi8iem7Nui"}) + listOpts := &client.ListOptions{} + matchingLabels.ApplyToList(listOpts) + + r, _ := listOpts.LabelSelector.Requirements() + _, err := labels.NewRequirement(r[0].Key(), r[0].Operator(), r[0].Values().List()) + Expect(err).ToNot(BeNil()) + expectedErrMsg := `values[0][k]: Invalid value: "axahm2EJ8Phiephe2eixohbee9eGeiyees1thuozi1xoh0GiuH3diewi8iem7Nui": must be no more than 63 characters` + Expect(err.Error()).To(Equal(expectedErrMsg)) + }) +}) diff --git a/pkg/client/patch.go b/pkg/client/patch.go new file mode 100644 index 0000000000..11d6083885 --- /dev/null +++ b/pkg/client/patch.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch/v5" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions +} + +// Type implements Patch. +func (s *mergeFromPatch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/pkg/client/patch_test.go b/pkg/client/patch_test.go new file mode 100644 index 0000000000..2910ef56bf --- /dev/null +++ b/pkg/client/patch_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func BenchmarkMergeFrom(b *testing.B) { + cm1 := &corev1.ConfigMap{} + cm1.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + cm1.ResourceVersion = "anything" + + cm2 := cm1.DeepCopy() + cm2.Data = map[string]string{"key": "value"} + + sts1 := &appsv1.StatefulSet{} + sts1.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) + sts1.ResourceVersion = "somesuch" + + sts2 := sts1.DeepCopy() + sts2.Spec.Template.Spec.Containers = []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1M"), + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + SecurityContext: &corev1.SecurityContext{}, + }} + + b.Run("NoOptions", func(b *testing.B) { + cmPatch := MergeFrom(cm1) + if _, err := cmPatch.Data(cm2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + stsPatch := MergeFrom(sts1) + if _, err := stsPatch.Data(sts2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = cmPatch.Data(cm2) + _, _ = stsPatch.Data(sts2) + } + }) + + b.Run("WithOptimisticLock", func(b *testing.B) { + cmPatch := MergeFromWithOptions(cm1, MergeFromWithOptimisticLock{}) + if _, err := cmPatch.Data(cm2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + stsPatch := MergeFromWithOptions(sts1, MergeFromWithOptimisticLock{}) + if _, err := stsPatch.Data(sts2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = cmPatch.Data(cm2) + _, _ = stsPatch.Data(sts2) + } + }) +} diff --git a/pkg/client/split.go b/pkg/client/split.go new file mode 100644 index 0000000000..8717345349 --- /dev/null +++ b/pkg/client/split.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj, opts...) + } + return d.CacheReader.Get(ctx, key, obj, opts...) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/pkg/client/testdata/examplecrd.yaml b/pkg/client/testdata/examplecrd.yaml new file mode 100644 index 0000000000..5409ee9789 --- /dev/null +++ b/pkg/client/testdata/examplecrd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: chaospods.chaosapps.metamagical.io +spec: + group: chaosapps.metamagical.io + names: + kind: ChaosPod + plural: chaospods + scope: Namespaced + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/client/typed_client.go b/pkg/client/typed_client.go new file mode 100644 index 0000000000..c4e56d9be6 --- /dev/null +++ b/pkg/client/typed_client.go @@ -0,0 +1,208 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/pkg/client/unstructured_client.go b/pkg/client/unstructured_client.go new file mode 100644 index 0000000000..3d3dbe7b28 --- /dev/null +++ b/pkg/client/unstructured_client.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/pkg/client/watch.go b/pkg/client/watch.go new file mode 100644 index 0000000000..70490664bd --- /dev/null +++ b/pkg/client/watch.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &watchingClient{client: client, dynamic: dynamicClient}, nil +} + +type watchingClient struct { + *client + dynamic dynamic.Interface +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case *unstructured.UnstructuredList: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := w.client.unstructuredClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + if listOpts.Namespace != "" && r.isNamespaced() { + return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) + } + return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/pkg/client/watch_test.go b/pkg/client/watch_test.go new file mode 100644 index 0000000000..6181596b5e --- /dev/null +++ b/pkg/client/watch_test.go @@ -0,0 +1,124 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("ClientWithWatch", func() { + var dep *appsv1.Deployment + var count uint64 = 0 + var replicaCount int32 = 2 + var ns = "kube-public" + ctx := context.TODO() + + BeforeEach(func() { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("watch-deployment-name-%v", count), Namespace: ns, Labels: map[string]string{"app": fmt.Sprintf("bar-%v", count)}}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }, serverSideTimeoutSeconds) + + AfterEach(func() { + deleteDeployment(ctx, dep, ns) + }, serverSideTimeoutSeconds) + + Describe("NewWithWatch", func() { + It("should return a new Client", func() { + cl, err := client.NewWithWatch(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + + watchSuite := func(through client.ObjectList, expectedType client.Object) { + cl, err := client.NewWithWatch(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + watchInterface, err := cl.Watch(ctx, through, &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", dep.Name), + Namespace: dep.Namespace, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(watchInterface).NotTo(BeNil()) + + defer watchInterface.Stop() + + event, ok := <-watchInterface.ResultChan() + Expect(ok).To(BeTrue()) + Expect(event.Type).To(BeIdenticalTo(watch.Added)) + Expect(event.Object).To(BeAssignableToTypeOf(expectedType)) + + // The metadata client doesn't set GVK so we just use the + // name and UID as a proxy to confirm that we got the right + // object. + metaObject, ok := event.Object.(metav1.Object) + Expect(ok).To(BeTrue()) + Expect(metaObject.GetName()).To(Equal(dep.Name)) + Expect(metaObject.GetUID()).To(Equal(dep.UID)) + + } + + It("should receive a create event when watching the typed object", func() { + watchSuite(&appsv1.DeploymentList{}, &appsv1.Deployment{}) + }, 15) + + It("should receive a create event when watching the unstructured object", func() { + u := &unstructured.UnstructuredList{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + watchSuite(u, &unstructured.Unstructured{}) + }, 15) + + It("should receive a create event when watching the metadata object", func() { + m := &metav1.PartialObjectMetadataList{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}} + watchSuite(m, &metav1.PartialObjectMetadata{}) + }, 15) + }) + +}) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go new file mode 100644 index 0000000000..4b8ee8e7c5 --- /dev/null +++ b/pkg/cluster/cluster.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +// Cluster provides various methods to interact with a cluster. +type Cluster interface { + // SetFields will set any dependencies on an object for which the object has implemented the inject + // interface - e.g. inject.Client. + // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. + SetFields(interface{}) error + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // Start starts the cluster + Start(ctx context.Context) error +} + +// Options are the possible options that can be configured for a Cluster. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // Logger is the logger that should be used by this Cluster. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + SyncPeriod *time.Duration + + // Namespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + Namespace string + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient NewClientFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) +} + +// Option can be used to manipulate Options. +type Option func(*Options) + +// New constructs a brand new cluster. +func New(config *rest.Config, opts ...Option) (Cluster, error) { + if config == nil { + return nil, errors.New("must specify Config") + } + + options := Options{} + for _, opt := range opts { + opt(&options) + } + options = setOptionsDefaults(options) + + // Create the mapper provider + mapper, err := options.MapperProvider(config) + if err != nil { + options.Logger.Error(err, "Failed to get API Group-Resources") + return nil, err + } + + // Create the cache for the cached read client and registering informers + cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + if err != nil { + return nil, err + } + + clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + + apiReader, err := client.New(config, clientOptions) + if err != nil { + return nil, err + } + + writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + if err != nil { + return nil, err + } + + if options.DryRunClient { + writeObj = client.NewDryRunClient(writeObj) + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + return &cluster{ + config: config, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: writeObj, + apiReader: apiReader, + recorderProvider: recorderProvider, + mapper: mapper, + logger: options.Logger, + }, nil +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Use the Kubernetes client-go scheme if none is specified + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + if options.MapperProvider == nil { + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(c) + } + } + + // Allow users to define how to create a new client + if options.NewClient == nil { + options.NewClient = DefaultNewClient + } + + // Allow newCache to be mocked + if options.NewCache == nil { + options.NewCache = cache.New + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/manager, we need it here to provide + // the user with an EventBroadcaster and there for the Leader election + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.Logger.GetSink() == nil { + options.Logger = logf.RuntimeLog.WithName("cluster") + } + + return options +} + +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) + +// DefaultNewClient creates the default caching client. +func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + c, err := client.New(config, options) + if err != nil { + return nil, err + } + + return client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cache, + Client: c, + UncachedObjects: uncachedObjects, + }) +} diff --git a/pkg/cluster/cluster_suite_test.go b/pkg/cluster/cluster_suite_test.go new file mode 100644 index 0000000000..4970497193 --- /dev/null +++ b/pkg/cluster/cluster_suite_test.go @@ -0,0 +1,70 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "net/http" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Cluster Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + clientTransport = rt.(*http.Transport) + return rt + } + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go new file mode 100644 index 0000000000..f9f7a0bdf3 --- /dev/null +++ b/pkg/cluster/cluster_test.go @@ -0,0 +1,297 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/goleak" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ = Describe("cluster.Cluster", func() { + Describe("New", func() { + It("should return an error if there is no Config", func() { + c, err := New(nil) + Expect(c).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("must specify Config")) + + }) + + It("should return an error if it can't create a RestMapper", func() { + expected := fmt.Errorf("expected error: RestMapper") + c, err := New(cfg, func(o *Options) { + o.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { return nil, expected } + }) + Expect(c).To(BeNil()) + Expect(err).To(Equal(expected)) + + }) + + It("should return an error it can't create a client.Client", func() { + c, err := New(cfg, func(o *Options) { + o.NewClient = func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + return nil, errors.New("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should return an error it can't create a cache.Cache", func() { + c, err := New(cfg, func(o *Options) { + o.NewCache = func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + return nil, fmt.Errorf("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should create a client defined in by the new client function", func() { + c, err := New(cfg, func(o *Options) { + o.NewClient = func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + return nil, nil + } + }) + Expect(c).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(c.GetClient()).To(BeNil()) + }) + + It("should return an error it can't create a recorder.Provider", func() { + c, err := New(cfg, func(o *Options) { + o.newRecorderProvider = func(_ *rest.Config, _ *runtime.Scheme, _ logr.Logger, _ intrec.EventBroadcasterProducer) (*intrec.Provider, error) { + return nil, fmt.Errorf("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + }) + + Describe("Start", func() { + It("should stop when context is cancelled", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(c.Start(ctx)).NotTo(HaveOccurred()) + }) + }) + + Describe("SetFields", func() { + It("should inject field values", func() { + c, err := New(cfg, func(o *Options) { + o.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return &informertest.FakeInformers{}, nil + } + }) + Expect(err).NotTo(HaveOccurred()) + + By("Injecting the dependencies") + err = c.SetFields(&injectable{ + scheme: func(scheme *runtime.Scheme) error { + defer GinkgoRecover() + Expect(scheme).To(Equal(c.GetScheme())) + return nil + }, + config: func(config *rest.Config) error { + defer GinkgoRecover() + Expect(config).To(Equal(c.GetConfig())) + return nil + }, + client: func(client client.Client) error { + defer GinkgoRecover() + Expect(client).To(Equal(c.GetClient())) + return nil + }, + cache: func(cache cache.Cache) error { + defer GinkgoRecover() + Expect(cache).To(Equal(c.GetCache())) + return nil + }, + log: func(logger logr.Logger) error { + defer GinkgoRecover() + Expect(logger).To(Equal(logf.RuntimeLog.WithName("cluster"))) + return nil + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Returning an error if dependency injection fails") + + expected := fmt.Errorf("expected error") + err = c.SetFields(&injectable{ + client: func(client client.Client) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = c.SetFields(&injectable{ + scheme: func(scheme *runtime.Scheme) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = c.SetFields(&injectable{ + config: func(config *rest.Config) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = c.SetFields(&injectable{ + cache: func(c cache.Cache) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + }) + }) + + It("should not leak goroutines when stopped", func() { + currentGRs := goleak.IgnoreCurrent() + + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(c.Start(ctx)).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should provide a function to get the Config", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetConfig()).To(Equal(cluster.config)) + }) + + It("should provide a function to get the Client", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetClient()).To(Equal(cluster.client)) + }) + + It("should provide a function to get the Scheme", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetScheme()).To(Equal(cluster.scheme)) + }) + + It("should provide a function to get the FieldIndexer", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetFieldIndexer()).To(Equal(cluster.cache)) + }) + + It("should provide a function to get the EventRecorder", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(c.GetEventRecorderFor("test")).NotTo(BeNil()) + }) + It("should provide a function to get the APIReader", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(c.GetAPIReader()).NotTo(BeNil()) + }) +}) + +var _ inject.Cache = &injectable{} +var _ inject.Client = &injectable{} +var _ inject.Scheme = &injectable{} +var _ inject.Config = &injectable{} +var _ inject.Logger = &injectable{} + +type injectable struct { + scheme func(scheme *runtime.Scheme) error + client func(client.Client) error + config func(config *rest.Config) error + cache func(cache.Cache) error + log func(logger logr.Logger) error +} + +func (i *injectable) InjectCache(c cache.Cache) error { + if i.cache == nil { + return nil + } + return i.cache(c) +} + +func (i *injectable) InjectConfig(config *rest.Config) error { + if i.config == nil { + return nil + } + return i.config(config) +} + +func (i *injectable) InjectClient(c client.Client) error { + if i.client == nil { + return nil + } + return i.client(c) +} + +func (i *injectable) InjectScheme(scheme *runtime.Scheme) error { + if i.scheme == nil { + return nil + } + return i.scheme(scheme) +} + +func (i *injectable) InjectLogger(log logr.Logger) error { + if i.log == nil { + return nil + } + return i.log(log) +} + +func (i *injectable) Start(<-chan struct{}) error { + return nil +} diff --git a/pkg/cluster/internal.go b/pkg/cluster/internal.go new file mode 100644 index 0000000000..125e1d144e --- /dev/null +++ b/pkg/cluster/internal.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type cluster struct { + // config is the rest.config used to talk to the apiserver. Required. + config *rest.Config + + // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults + // to scheme.scheme. + scheme *runtime.Scheme + + cache cache.Cache + + // TODO(directxman12): Provide an escape hatch to get individual indexers + // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). + client client.Client + + // apiReader is the reader that will make requests to the api server and not the cache. + apiReader client.Reader + + // fieldIndexes knows how to add field indexes over the Cache used by this controller, + // which can later be consumed via field selectors from the injected client. + fieldIndexes client.FieldIndexer + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // mapper is used to map resources to kind, and map kind and version. + mapper meta.RESTMapper + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger +} + +func (c *cluster) SetFields(i interface{}) error { + if _, err := inject.ConfigInto(c.config, i); err != nil { + return err + } + if _, err := inject.ClientInto(c.client, i); err != nil { + return err + } + if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { + return err + } + if _, err := inject.SchemeInto(c.scheme, i); err != nil { + return err + } + if _, err := inject.CacheInto(c.cache, i); err != nil { + return err + } + if _, err := inject.MapperInto(c.mapper, i); err != nil { + return err + } + return nil +} + +func (c *cluster) GetConfig() *rest.Config { + return c.config +} + +func (c *cluster) GetClient() client.Client { + return c.client +} + +func (c *cluster) GetScheme() *runtime.Scheme { + return c.scheme +} + +func (c *cluster) GetFieldIndexer() client.FieldIndexer { + return c.fieldIndexes +} + +func (c *cluster) GetCache() cache.Cache { + return c.cache +} + +func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder { + return c.recorderProvider.GetEventRecorderFor(name) +} + +func (c *cluster) GetRESTMapper() meta.RESTMapper { + return c.mapper +} + +func (c *cluster) GetAPIReader() client.Reader { + return c.apiReader +} + +func (c *cluster) GetLogger() logr.Logger { + return c.logger +} + +func (c *cluster) Start(ctx context.Context) error { + defer c.recorderProvider.Stop(ctx) + return c.cache.Start(ctx) +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 0000000000..8e853d6a0f --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,112 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the functions necessary to parse a config file +// and to configure the Options struct for the ctrl.Manager. +type ControllerManagerConfiguration interface { + runtime.Object + + // Complete returns the versioned configuration + Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) +} + +// DeferredFileLoader is used to configure the decoder for loading controller +// runtime component config types. +type DeferredFileLoader struct { + ControllerManagerConfiguration + path string + scheme *runtime.Scheme + once sync.Once + err error +} + +// File will set up the deferred file loader for the configuration +// this will also configure the defaults for the loader if nothing is +// +// Defaults: +// * Path: "./config.yaml" +// * Kind: GenericControllerManagerConfiguration +func File() *DeferredFileLoader { + scheme := runtime.NewScheme() + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + return &DeferredFileLoader{ + path: "./config.yaml", + ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{}, + scheme: scheme, + } +} + +// Complete will use sync.Once to set the scheme. +func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { + d.once.Do(d.loadFile) + if d.err != nil { + return v1alpha1.ControllerManagerConfigurationSpec{}, d.err + } + return d.ControllerManagerConfiguration.Complete() +} + +// AtPath will set the path to load the file for the decoder. +func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { + d.path = path + return d +} + +// OfKind will set the type to be used for decoding the file into. +func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { + d.ControllerManagerConfiguration = obj + return d +} + +// InjectScheme will configure the scheme to be used for decoding the file. +func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { + d.scheme = scheme + return nil +} + +// loadFile is used from the mutex.Once to load the file. +func (d *DeferredFileLoader) loadFile() { + if d.scheme == nil { + d.err = fmt.Errorf("scheme not supplied to controller configuration loader") + return + } + + content, err := os.ReadFile(d.path) + if err != nil { + d.err = fmt.Errorf("could not read file at %s", d.path) + return + } + + codecs := serializer.NewCodecFactory(d.scheme) + + // Regardless of if the bytes are of any external version, + // it will be read successfully and converted into the internal version + if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { + d.err = fmt.Errorf("could not decode file into runtime.Object") + } +} diff --git a/pkg/config/config_suite_test.go b/pkg/config/config_suite_test.go new file mode 100644 index 0000000000..9a494dafbc --- /dev/null +++ b/pkg/config/config_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestScheme(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Config Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 0000000000..f2b5461b55 --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +var _ = Describe("config", func() { + Describe("File", func() { + + It("should error loading from non existent file", func() { + loader := config.File() + _, err := loader.Complete() + Expect(err).ToNot(BeNil()) + }) + + It("should load a config from file", func() { + conf := v1alpha1.ControllerManagerConfiguration{} + loader := config.File().AtPath("./testdata/config.yaml").OfKind(&conf) + Expect(conf.CacheNamespace).To(Equal("")) + + _, err := loader.Complete() + Expect(err).To(BeNil()) + + Expect(*conf.LeaderElection.LeaderElect).To(Equal(true)) + Expect(conf.CacheNamespace).To(Equal("default")) + Expect(conf.Metrics.BindAddress).To(Equal(":8081")) + }) + }) +}) diff --git a/pkg/config/doc.go b/pkg/config/doc.go new file mode 100644 index 0000000000..a169ec5597 --- /dev/null +++ b/pkg/config/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains functionality for interacting with ComponentConfig +// files +// +// # DeferredFileLoader +// +// This uses a deferred file decoding allowing you to chain your configuration +// setup. You can pass this into manager.Options#File and it will load your +// config. +package config diff --git a/pkg/config/example_test.go b/pkg/config/example_test.go new file mode 100644 index 0000000000..fb1cd58b5f --- /dev/null +++ b/pkg/config/example_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config_test + +import ( + "fmt" + "os" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/config" + + "sigs.k8s.io/controller-runtime/examples/configfile/custom/v1alpha1" +) + +var scheme = runtime.NewScheme() + +func init() { + _ = v1alpha1.AddToScheme(scheme) +} + +// This example will load a file using Complete with only +// defaults set. +func ExampleFile() { + // This will load a config file from ./config.yaml + loader := config.File() + if _, err := loader.Complete(); err != nil { + fmt.Println("failed to load config") + os.Exit(1) + } +} + +// This example will load the file from a custom path. +func ExampleDeferredFileLoader_atPath() { + loader := config.File().AtPath("/var/run/controller-runtime/config.yaml") + if _, err := loader.Complete(); err != nil { + fmt.Println("failed to load config") + os.Exit(1) + } +} + +// This example sets up loader with a custom scheme. +func ExampleDeferredFileLoader_injectScheme() { + loader := config.File() + err := loader.InjectScheme(scheme) + if err != nil { + fmt.Println("failed to inject scheme") + os.Exit(1) + } + + _, err = loader.Complete() + if err != nil { + fmt.Println("failed to load config") + os.Exit(1) + } +} + +// This example sets up the loader with a custom scheme and custom type. +func ExampleDeferredFileLoader_ofKind() { + loader := config.File().OfKind(&v1alpha1.CustomControllerManagerConfiguration{}) + err := loader.InjectScheme(scheme) + if err != nil { + fmt.Println("failed to inject scheme") + os.Exit(1) + } + _, err = loader.Complete() + if err != nil { + fmt.Println("failed to load config") + os.Exit(1) + } +} diff --git a/pkg/config/testdata/config.yaml b/pkg/config/testdata/config.yaml new file mode 100644 index 0000000000..d88da3a65b --- /dev/null +++ b/pkg/config/testdata/config.yaml @@ -0,0 +1,7 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: ControllerManagerConfiguration +cacheNamespace: default +metrics: + bindAddress: :8081 +leaderElection: + leaderElect: true diff --git a/pkg/config/v1alpha1/doc.go b/pkg/config/v1alpha1/doc.go new file mode 100644 index 0000000000..1e3adbafb8 --- /dev/null +++ b/pkg/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 provides the ControllerManagerConfiguration used for +// configuring ctrl.Manager +// +kubebuilder:object:generate=true +package v1alpha1 diff --git a/pkg/config/v1alpha1/register.go b/pkg/config/v1alpha1/register.go new file mode 100644 index 0000000000..9efdbc0668 --- /dev/null +++ b/pkg/config/v1alpha1/register.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + SchemeBuilder.Register(&ControllerManagerConfiguration{}) +} diff --git a/pkg/config/v1alpha1/types.go b/pkg/config/v1alpha1/types.go new file mode 100644 index 0000000000..e67b62e514 --- /dev/null +++ b/pkg/config/v1alpha1/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +type ControllerManagerConfigurationSpec struct { + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // CacheNamespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + // +optional + CacheNamespace string `json:"cacheNamespace,omitempty"` + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"` + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller *ControllerConfigurationSpec `json:"controller,omitempty"` + + // Metrics contains thw controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + + // Webhook contains the controllers webhook configuration + // +optional + Webhook ControllerWebhook `json:"webhook,omitempty"` +} + +// ControllerConfigurationSpec defines the global configuration for +// controllers registered with the manager. +type ControllerConfigurationSpec struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + // + // +optional + GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"` + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + // +optional + CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"` +} + +// ControllerMetrics defines the metrics configs. +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs. +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + // ReadinessEndpointName, defaults to "readyz" + // +optional + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + + // LivenessEndpointName, defaults to "healthz" + // +optional + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} + +// ControllerWebhook defines the webhook server for the controller. +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + // +optional + Port *int `json:"port,omitempty"` + + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + // +optional + Host string `json:"host,omitempty"` + + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // +optional + CertDir string `json:"certDir,omitempty"` +} + +// +kubebuilder:object:root=true + +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +type ControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfiguration returns the contfigurations for controllers + ControllerManagerConfigurationSpec `json:",inline"` +} + +// Complete returns the configuration for controller-runtime. +func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { + return *c, nil +} diff --git a/pkg/config/v1alpha1/zz_generated.deepcopy.go b/pkg/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5329bef667 --- /dev/null +++ b/pkg/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,153 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) { + *out = *in + if in.GroupKindConcurrency != nil { + in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CacheSyncTimeout != nil { + in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout + *out = new(timex.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec. +func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration. +func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(ControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) { + *out = *in + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(configv1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.GracefulShutdownTimeout != nil { + in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout + *out = new(v1.Duration) + **out = **in + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(ControllerConfigurationSpec) + (*in).DeepCopyInto(*out) + } + out.Metrics = in.Metrics + out.Health = in.Health + in.Webhook.DeepCopyInto(&out.Webhook) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec. +func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerManagerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook. +func (in *ControllerWebhook) DeepCopy() *ControllerWebhook { + if in == nil { + return nil + } + out := new(ControllerWebhook) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go new file mode 100644 index 0000000000..8e3d8591d6 --- /dev/null +++ b/pkg/controller/controller.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/internal/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Options are the arguments for creating a new Controller. +type Options struct { + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler reconciles an object + Reconciler reconcile.Reconciler + + // RateLimiter is used to limit how frequently requests may be queued. + // Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting. + // The overall is a token bucket and the per-item is exponential. + RateLimiter ratelimiter.RateLimiter + + // LogConstructor is used to construct a logger used for this controller and passed + // to each reconciliation via the context field. + LogConstructor func(request *reconcile.Request) logr.Logger + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests +// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. +// Work typically is reads and writes Kubernetes objects to make the system state match the state specified +// in the object Spec. +type Controller interface { + // Reconciler is called to reconcile an object by Namespace/Name + reconcile.Reconciler + + // Watch takes events provided by a Source and uses the EventHandler to + // enqueue reconcile.Requests in response to the events. + // + // Watch may be provided one or more Predicates to filter events before + // they are given to the EventHandler. Events will be passed to the + // EventHandler if all provided Predicates evaluate to true. + Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error + + // Start starts the controller. Start blocks until the context is closed or a + // controller has an error starting. + Start(ctx context.Context) error + + // GetLogger returns this controller logger prefilled with basic information. + GetLogger() logr.Logger +} + +// New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have +// been synced before the Controller is Started. +func New(name string, mgr manager.Manager, options Options) (Controller, error) { + c, err := NewUnmanaged(name, mgr, options) + if err != nil { + return nil, err + } + + // Add the controller as a Manager components + return c, mgr.Add(c) +} + +// NewUnmanaged returns a new controller without adding it to the manager. The +// caller is responsible for starting the returned controller. +func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) { + if options.Reconciler == nil { + return nil, fmt.Errorf("must specify Reconciler") + } + + if len(name) == 0 { + return nil, fmt.Errorf("must specify Name for Controller") + } + + if options.LogConstructor == nil { + log := mgr.GetLogger().WithValues( + "controller", name, + ) + options.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + "object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + if options.MaxConcurrentReconciles <= 0 { + options.MaxConcurrentReconciles = 1 + } + + if options.CacheSyncTimeout == 0 { + options.CacheSyncTimeout = 2 * time.Minute + } + + if options.RateLimiter == nil { + options.RateLimiter = workqueue.DefaultControllerRateLimiter() + } + + // Inject dependencies into Reconciler + if err := mgr.SetFields(options.Reconciler); err != nil { + return nil, err + } + + // Create controller with dependencies set + return &controller.Controller{ + Do: options.Reconciler, + MakeQueue: func() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name) + }, + MaxConcurrentReconciles: options.MaxConcurrentReconciles, + CacheSyncTimeout: options.CacheSyncTimeout, + SetFields: mgr.SetFields, + Name: name, + LogConstructor: options.LogConstructor, + RecoverPanic: options.RecoverPanic, + }, nil +} diff --git a/pkg/controller/controller_integration_test.go b/pkg/controller/controller_integration_test.go new file mode 100644 index 0000000000..9f347b0032 --- /dev/null +++ b/pkg/controller/controller_integration_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var _ = Describe("controller", func() { + var reconciled chan reconcile.Request + ctx := context.Background() + + BeforeEach(func() { + reconciled = make(chan reconcile.Request) + Expect(cfg).NotTo(BeNil()) + }) + + Describe("controller", func() { + // TODO(directxman12): write a whole suite of controller-client interaction tests + + It("should reconcile", func() { + By("Creating the Manager") + cm, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the Controller") + instance, err := controller.New("foo-controller", cm, controller.Options{ + Reconciler: reconcile.Func( + func(_ context.Context, request reconcile.Request) (reconcile.Result, error) { + reconciled <- request + return reconcile.Result{}, nil + }), + }) + Expect(err).NotTo(HaveOccurred()) + + By("Watching Resources") + err = instance.Watch(&source.Kind{Type: &appsv1.ReplicaSet{}}, &handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.Deployment{}, + }) + Expect(err).NotTo(HaveOccurred()) + + err = instance.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}) + Expect(err).NotTo(HaveOccurred()) + + err = cm.GetClient().Get(ctx, types.NamespacedName{Name: "foo"}, &corev1.Namespace{}) + Expect(err).To(Equal(&cache.ErrCacheNotStarted{})) + err = cm.GetClient().List(ctx, &corev1.NamespaceList{}) + Expect(err).To(Equal(&cache.ErrCacheNotStarted{})) + + By("Starting the Manager") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(cm.Start(ctx)).NotTo(HaveOccurred()) + }() + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-name"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + SecurityContext: &corev1.SecurityContext{ + Privileged: truePtr(), + }, + }, + }, + }, + }, + }, + } + expectedReconcileRequest := reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: "default", + Name: "deployment-name", + }} + + By("Invoking Reconciling for Create") + deployment, err = clientset.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Invoking Reconciling for Update") + newDeployment := deployment.DeepCopy() + newDeployment.Labels = map[string]string{"foo": "bar"} + _, err = clientset.AppsV1().Deployments("default").Update(ctx, newDeployment, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Invoking Reconciling for an OwnedObject when it is created") + replicaset := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs-name", + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deployment, schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }), + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: deployment.Spec.Template, + }, + } + replicaset, err = clientset.AppsV1().ReplicaSets("default").Create(ctx, replicaset, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Invoking Reconciling for an OwnedObject when it is updated") + newReplicaset := replicaset.DeepCopy() + newReplicaset.Labels = map[string]string{"foo": "bar"} + _, err = clientset.AppsV1().ReplicaSets("default").Update(ctx, newReplicaset, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Invoking Reconciling for an OwnedObject when it is deleted") + err = clientset.AppsV1().ReplicaSets("default").Delete(ctx, replicaset.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Invoking Reconciling for Delete") + err = clientset.AppsV1().Deployments("default"). + Delete(ctx, "deployment-name", metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + + By("Listing a type with a slice of pointers as items field") + err = cm.GetClient(). + List(context.Background(), &controllertest.UnconventionalListTypeList{}) + Expect(err).NotTo(HaveOccurred()) + }, 5) + }) +}) + +func truePtr() *bool { + t := true + return &t +} diff --git a/pkg/controller/controller_suite_test.go b/pkg/controller/controller_suite_test.go new file mode 100644 index 0000000000..71b2232239 --- /dev/null +++ b/pkg/controller/controller_suite_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "net/http" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" + crscheme "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Controller Integration Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + err := (&crscheme.Builder{ + GroupVersion: schema.GroupVersion{Group: "chaosapps.metamagical.io", Version: "v1"}, + }). + Register( + &controllertest.UnconventionalListType{}, + &controllertest.UnconventionalListTypeList{}, + ).AddToScheme(scheme.Scheme) + Expect(err).To(BeNil()) + + testenv = &envtest.Environment{ + CRDDirectoryPaths: []string{"testdata/crds"}, + } + + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + clientTransport = rt.(*http.Transport) + return rt + } + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + // Prevent the metrics listener being created + metrics.DefaultBindAddress = "0" +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) + + // Put the DefaultBindAddress back + metrics.DefaultBindAddress = ":8080" +}) diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go new file mode 100644 index 0000000000..d3e8419a16 --- /dev/null +++ b/pkg/controller/controller_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/goleak" + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ = Describe("controller.Controller", func() { + rec := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }) + + Describe("New", func() { + It("should return an error if Name is not Specified", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + c, err := controller.New("", m, controller.Options{Reconciler: rec}) + Expect(c).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("must specify Name for Controller")) + }) + + It("should return an error if Reconciler is not Specified", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("foo", m, controller.Options{}) + Expect(c).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("must specify Reconciler")) + }) + + It("NewController should return an error if injecting Reconciler fails", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("foo", m, controller.Options{Reconciler: &failRec{}}) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should not return an error if two controllers are registered with different names", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c1, err := controller.New("c1", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c1).ToNot(BeNil()) + + c2, err := controller.New("c2", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c2).ToNot(BeNil()) + }) + + It("should not leak goroutines when stopped", func() { + currentGRs := goleak.IgnoreCurrent() + + watchChan := make(chan event.GenericEvent, 1) + watch := &source.Channel{Source: watchChan} + watchChan <- event.GenericEvent{Object: &corev1.Pod{}} + + reconcileStarted := make(chan struct{}) + controllerFinished := make(chan struct{}) + rec := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + close(reconcileStarted) + // Make sure reconciliation takes a moment and is not quicker than the controllers + // shutdown. + time.Sleep(50 * time.Millisecond) + // Explicitly test this on top of the leakdetection, as the latter uses Eventually + // so might succeed even when the controller does not wait for all reconciliations + // to finish. + Expect(controllerFinished).NotTo(BeClosed()) + return reconcile.Result{}, nil + }) + + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller", m, controller.Options{Reconciler: rec}) + Expect(c.Watch(watch, &handler.EnqueueRequestForObject{})).To(Succeed()) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + close(controllerFinished) + }() + + <-reconcileStarted + cancel() + <-controllerFinished + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should not create goroutines if never started", func() { + currentGRs := goleak.IgnoreCurrent() + + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + _, err = controller.New("new-controller", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + }) +}) + +var _ reconcile.Reconciler = &failRec{} +var _ inject.Client = &failRec{} + +type failRec struct{} + +func (*failRec) Reconcile(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func (*failRec) InjectClient(client.Client) error { + return fmt.Errorf("expected error") +} diff --git a/pkg/controller/controllertest/doc.go b/pkg/controller/controllertest/doc.go new file mode 100644 index 0000000000..91c5a3e35e --- /dev/null +++ b/pkg/controller/controllertest/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllertest contains fake informers for testing controllers +// When in doubt, it's almost always better to test against a real API server +// using envtest.Environment. +package controllertest diff --git a/pkg/controller/controllertest/testing.go b/pkg/controller/controllertest/testing.go new file mode 100644 index 0000000000..b9f97d5289 --- /dev/null +++ b/pkg/controller/controllertest/testing.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllertest + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/workqueue" +) + +var _ runtime.Object = &ErrorType{} + +// ErrorType implements runtime.Object but isn't registered in any scheme and should cause errors in tests as a result. +type ErrorType struct{} + +// GetObjectKind implements runtime.Object. +func (ErrorType) GetObjectKind() schema.ObjectKind { return nil } + +// DeepCopyObject implements runtime.Object. +func (ErrorType) DeepCopyObject() runtime.Object { return nil } + +var _ workqueue.RateLimitingInterface = Queue{} + +// Queue implements a RateLimiting queue as a non-ratelimited queue for testing. +// This helps testing by having functions that use a RateLimiting queue synchronously add items to the queue. +type Queue struct { + workqueue.Interface +} + +// AddAfter implements RateLimitingInterface. +func (q Queue) AddAfter(item interface{}, duration time.Duration) { + q.Add(item) +} + +// AddRateLimited implements RateLimitingInterface. TODO(community): Implement this. +func (q Queue) AddRateLimited(item interface{}) { + q.Add(item) +} + +// Forget implements RateLimitingInterface. TODO(community): Implement this. +func (q Queue) Forget(item interface{}) {} + +// NumRequeues implements RateLimitingInterface. TODO(community): Implement this. +func (q Queue) NumRequeues(item interface{}) int { + return 0 +} diff --git a/pkg/controller/controllertest/unconventionallisttypecrd.go b/pkg/controller/controllertest/unconventionallisttypecrd.go new file mode 100644 index 0000000000..d0f5017154 --- /dev/null +++ b/pkg/controller/controllertest/unconventionallisttypecrd.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllertest + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.Object = &UnconventionalListType{} +var _ runtime.Object = &UnconventionalListTypeList{} + +// UnconventionalListType is used to test CRDs with List types that +// have a slice of pointers rather than a slice of literals. +type UnconventionalListType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec string `json:"spec,omitempty"` +} + +// DeepCopyObject implements runtime.Object +// Handwritten for simplicity. +func (u *UnconventionalListType) DeepCopyObject() runtime.Object { + return u.DeepCopy() +} + +// DeepCopy implements *UnconventionalListType +// Handwritten for simplicity. +func (u *UnconventionalListType) DeepCopy() *UnconventionalListType { + return &UnconventionalListType{ + TypeMeta: u.TypeMeta, + ObjectMeta: *u.ObjectMeta.DeepCopy(), + Spec: u.Spec, + } +} + +// UnconventionalListTypeList is used to test CRDs with List types that +// have a slice of pointers rather than a slice of literals. +type UnconventionalListTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*UnconventionalListType `json:"items"` +} + +// DeepCopyObject implements runtime.Object +// Handwritten for simplicity. +func (u *UnconventionalListTypeList) DeepCopyObject() runtime.Object { + return u.DeepCopy() +} + +// DeepCopy implements *UnconventionalListTypeListt +// Handwritten for simplicity. +func (u *UnconventionalListTypeList) DeepCopy() *UnconventionalListTypeList { + out := &UnconventionalListTypeList{ + TypeMeta: u.TypeMeta, + ListMeta: *u.ListMeta.DeepCopy(), + } + for _, item := range u.Items { + out.Items = append(out.Items, item.DeepCopy()) + } + return out +} diff --git a/pkg/controller/controllertest/util.go b/pkg/controller/controllertest/util.go new file mode 100644 index 0000000000..b638b4976c --- /dev/null +++ b/pkg/controller/controllertest/util.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllertest + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +var _ cache.SharedIndexInformer = &FakeInformer{} + +// FakeInformer provides fake Informer functionality for testing. +type FakeInformer struct { + // Synced is returned by the HasSynced functions to implement the Informer interface + Synced bool + + // RunCount is incremented each time RunInformersAndControllers is called + RunCount int + + handlers []cache.ResourceEventHandler +} + +// AddIndexers does nothing. TODO(community): Implement this. +func (f *FakeInformer) AddIndexers(indexers cache.Indexers) error { + return nil +} + +// GetIndexer does nothing. TODO(community): Implement this. +func (f *FakeInformer) GetIndexer() cache.Indexer { + return nil +} + +// Informer returns the fake Informer. +func (f *FakeInformer) Informer() cache.SharedIndexInformer { + return f +} + +// HasSynced implements the Informer interface. Returns f.Synced. +func (f *FakeInformer) HasSynced() bool { + return f.Synced +} + +// AddEventHandler implements the Informer interface. Adds an EventHandler to the fake Informers. +func (f *FakeInformer) AddEventHandler(handler cache.ResourceEventHandler) { + f.handlers = append(f.handlers, handler) +} + +// Run implements the Informer interface. Increments f.RunCount. +func (f *FakeInformer) Run(<-chan struct{}) { + f.RunCount++ +} + +// Add fakes an Add event for obj. +func (f *FakeInformer) Add(obj metav1.Object) { + for _, h := range f.handlers { + h.OnAdd(obj) + } +} + +// Update fakes an Update event for obj. +func (f *FakeInformer) Update(oldObj, newObj metav1.Object) { + for _, h := range f.handlers { + h.OnUpdate(oldObj, newObj) + } +} + +// Delete fakes an Delete event for obj. +func (f *FakeInformer) Delete(obj metav1.Object) { + for _, h := range f.handlers { + h.OnDelete(obj) + } +} + +// AddEventHandlerWithResyncPeriod does nothing. TODO(community): Implement this. +func (f *FakeInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + +} + +// GetStore does nothing. TODO(community): Implement this. +func (f *FakeInformer) GetStore() cache.Store { + return nil +} + +// GetController does nothing. TODO(community): Implement this. +func (f *FakeInformer) GetController() cache.Controller { + return nil +} + +// LastSyncResourceVersion does nothing. TODO(community): Implement this. +func (f *FakeInformer) LastSyncResourceVersion() string { + return "" +} + +// SetWatchErrorHandler does nothing. TODO(community): Implement this. +func (f *FakeInformer) SetWatchErrorHandler(cache.WatchErrorHandler) error { + return nil +} + +// SetTransform does nothing. TODO(community): Implement this. +func (f *FakeInformer) SetTransform(t cache.TransformFunc) error { + return nil +} diff --git a/pkg/controller/controllerutil/controllerutil.go b/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 0000000000..aa53a77d41 --- /dev/null +++ b/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,394 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner. +type AlreadyOwnedError struct { + Object metav1.Object + Owner metav1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: obj, + Owner: owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on controlled. +// This is used for garbage collection of the controlled object and for +// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + if err := validateOwner(owner, controlled); err != nil { + return err + } + + // Create a new controller ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + } + + // Return early with an error if the object is already controlled. + if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { + return newAlreadyOwnedError(controlled, *existing) + } + + // Update owner references and return. + upsertOwnerRef(ref, controlled) + return nil +} + +// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. +// This allows you to declare that owner has a dependency on the object without specifying it as a controller. +// If a reference to the same object already exists, it'll be overwritten with the newly provided version. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner) + } + if err := validateOwner(owner, object); err != nil { + return err + } + + // Create a new owner ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + + // Update owner references and return. + upsertOwnerRef(ref, object) + return nil +} + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +func validateOwner(owner, object metav1.Object) error { + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } + return nil +} + +// Returns true if a and b point to the same object. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call. +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed. + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created. + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated. + OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated. + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated. + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" +) + +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + existing := obj.DeepCopyObject() //nolint + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + + if equality.Semantic.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + + if err := c.Update(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultUpdated, nil +} + +// CreateOrPatch creates or patches the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the before +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + +// mutate wraps a MutateFn and applies validation to its result. +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { + if err := f(); err != nil { + return err + } + if newKey := client.ObjectKeyFromObject(obj); key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} + +// MutateFn is a function which mutates the existing object into its desired state. +type MutateFn func() error + +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +// It returns an indication of whether it updated the object's list of finalizers. +func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return false + } + } + o.SetFinalizers(append(f, finalizer)) + return true +} + +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +// It returns an indication of whether it updated the object's list of finalizers. +func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for i := 0; i < len(f); i++ { + if f[i] == finalizer { + f = append(f[:i], f[i+1:]...) + i-- + finalizersUpdated = true + } + } + o.SetFinalizers(f) + return +} + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o client.Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// Object allows functions to work indistinctly with any resource that +// implements both Object interfaces. +// +// Deprecated: Use client.Object instead. +type Object = client.Object diff --git a/pkg/controller/controllerutil/controllerutil_suite_test.go b/pkg/controller/controllerutil/controllerutil_suite_test.go new file mode 100644 index 0000000000..da4c5cf4ac --- /dev/null +++ b/pkg/controller/controllerutil/controllerutil_suite_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestControllerutil(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Controllerutil Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var c client.Client + +var _ = BeforeSuite(func() { + var err error + + testenv = &envtest.Environment{} + + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + c, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/controller/controllerutil/controllerutil_test.go b/pkg/controller/controllerutil/controllerutil_test.go new file mode 100644 index 0000000000..4fd77dc497 --- /dev/null +++ b/pkg/controller/controllerutil/controllerutil_test.go @@ -0,0 +1,837 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil_test + +import ( + "context" + "fmt" + "math/rand" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var _ = Describe("Controllerutil", func() { + Describe("SetOwnerReference", func() { + It("should set ownerRef on an empty list", func() { + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + })) + }) + + It("should not duplicate owner references", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + }, + }, + }, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + })) + }) + + It("should update the reference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid-1", + }, + }, + }, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid-2", + })) + + }) + }) + + Describe("SetControllerReference", func() { + It("should set the OwnerReference if it can find the group version kind", func() { + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + t := true + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + + It("should return an error if it can't find the group version kind of the owner", func() { + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + } + Expect(controllerutil.SetControllerReference(dep, rs, runtime.NewScheme())).To(HaveOccurred()) + }) + + It("should return an error if the owner isn't a runtime.Object", func() { + rs := &appsv1.ReplicaSet{} + Expect(controllerutil.SetControllerReference(&errMetaObj{}, rs, scheme.Scheme)).To(HaveOccurred()) + }) + + It("should return an error if object is already owned by another controller", func() { + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "bar", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "bar-uid", + Controller: &t, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + err := controllerutil.SetControllerReference(dep, rs, scheme.Scheme) + + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(&controllerutil.AlreadyOwnedError{})) + }) + + It("should not duplicate existing owner reference", func() { + f := false + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &f, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + + It("should replace the owner reference if it's already present", func() { + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + + It("should return an error if it's setting a cross-namespace owner reference", func() { + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "namespace1"}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "namespace2", UID: "foo-uid"}} + + err := controllerutil.SetControllerReference(dep, rs, scheme.Scheme) + + Expect(err).To(HaveOccurred()) + }) + + It("should return an error if it's owner is namespaced resource but dependant is cluster-scoped resource", func() { + pv := &corev1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + err := controllerutil.SetControllerReference(pod, pv, scheme.Scheme) + + Expect(err).To(HaveOccurred()) + }) + + It("should not return any error if the existing owner has a different version", func() { + f := false + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid", + Controller: &f, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + // APIVersion is the new owner's one + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + }) + + Describe("CreateOrUpdate", func() { + var deploy *appsv1.Deployment + var deplSpec appsv1.DeploymentSpec + var deplKey types.NamespacedName + var specr controllerutil.MutateFn + + BeforeEach(func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deploy-%d", rand.Int31()), //nolint:gosec + Namespace: "default", + }, + } + + deplSpec = appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + } + + deplKey = types.NamespacedName{ + Name: deploy.Name, + Namespace: deploy.Namespace, + } + + specr = deploymentSpecr(deploy, deplSpec) + }) + + It("creates a new object if one doesn't exists", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultCreated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + By("actually having the deployment created") + fetched := &appsv1.Deployment{} + Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + + By("being mutated by MutateFn") + Expect(fetched.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(fetched.Spec.Template.Spec.Containers[0].Name).To(Equal(deplSpec.Template.Spec.Containers[0].Name)) + Expect(fetched.Spec.Template.Spec.Containers[0].Image).To(Equal(deplSpec.Template.Spec.Containers[0].Image)) + }) + + It("updates existing object", func() { + var scale int32 = 2 + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + Expect(err).NotTo(HaveOccurred()) + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentScaler(deploy, scale)) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdated)) + + By("actually having the deployment scaled") + fetched := &appsv1.Deployment{} + Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + Expect(*fetched.Spec.Replicas).To(Equal(scale)) + }) + + It("updates only changed objects", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentIdentity) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when MutateFn changes object name on creation", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, func() error { + Expect(specr()).To(Succeed()) + return deploymentRenamer(deploy)() + }) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when MutateFn renames an object", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentRenamer(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when object namespace changes", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentNamespaceChanger(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("aborts immediately if there was an error initially retrieving the object", func() { + op, err := controllerutil.CreateOrUpdate(context.TODO(), errorReader{c}, deploy, func() error { + Fail("Mutation method should not run") + return nil + }) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("CreateOrPatch", func() { + var deploy *appsv1.Deployment + var deplSpec appsv1.DeploymentSpec + var deplKey types.NamespacedName + var specr controllerutil.MutateFn + + BeforeEach(func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deploy-%d", rand.Int31()), //nolint:gosec + Namespace: "default", + }, + } + + deplSpec = appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + } + + deplKey = types.NamespacedName{ + Name: deploy.Name, + Namespace: deploy.Namespace, + } + + specr = deploymentSpecr(deploy, deplSpec) + }) + + assertLocalDeployWasUpdated := func(fetched *appsv1.Deployment) { + By("local deploy object was updated during patch & has same spec, status, resource version as fetched") + if fetched == nil { + fetched = &appsv1.Deployment{} + ExpectWithOffset(1, c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + } + ExpectWithOffset(1, fetched.ResourceVersion).To(Equal(deploy.ResourceVersion)) + ExpectWithOffset(1, fetched.Spec).To(BeEquivalentTo(deploy.Spec)) + ExpectWithOffset(1, fetched.Status).To(BeEquivalentTo(deploy.Status)) + } + + assertLocalDeployStatusWasUpdated := func(fetched *appsv1.Deployment) { + By("local deploy object was updated during patch & has same spec, status, resource version as fetched") + if fetched == nil { + fetched = &appsv1.Deployment{} + ExpectWithOffset(1, c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + } + ExpectWithOffset(1, fetched.ResourceVersion).To(Equal(deploy.ResourceVersion)) + ExpectWithOffset(1, *fetched.Spec.Replicas).To(BeEquivalentTo(int32(5))) + ExpectWithOffset(1, fetched.Status).To(BeEquivalentTo(deploy.Status)) + ExpectWithOffset(1, len(fetched.Status.Conditions)).To(BeEquivalentTo(1)) + } + + It("creates a new object if one doesn't exists", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultCreated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + By("actually having the deployment created") + fetched := &appsv1.Deployment{} + Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + + By("being mutated by MutateFn") + Expect(fetched.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(fetched.Spec.Template.Spec.Containers[0].Name).To(Equal(deplSpec.Template.Spec.Containers[0].Name)) + Expect(fetched.Spec.Template.Spec.Containers[0].Image).To(Equal(deplSpec.Template.Spec.Containers[0].Image)) + }) + + It("patches existing object", func() { + var scale int32 = 2 + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + Expect(err).NotTo(HaveOccurred()) + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, deploymentScaler(deploy, scale)) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdated)) + + By("actually having the deployment scaled") + fetched := &appsv1.Deployment{} + Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + Expect(*fetched.Spec.Replicas).To(Equal(scale)) + assertLocalDeployWasUpdated(fetched) + }) + + It("patches only changed objects", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, deploymentIdentity) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + + assertLocalDeployWasUpdated(nil) + }) + + It("patches only changed status", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: 3, + } + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, deploymentStatusr(deploy, deployStatus)) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatusOnly") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatusOnly)) + + assertLocalDeployWasUpdated(nil) + }) + + It("patches resource and status", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + replicas := int32(3) + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: replicas, + } + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, func() error { + Expect(deploymentScaler(deploy, replicas)()).To(Succeed()) + return deploymentStatusr(deploy, deployStatus)() + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployWasUpdated(nil) + }) + + It("patches resource and not empty status", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + replicas := int32(3) + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: replicas, + } + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, func() error { + Expect(deploymentScaler(deploy, replicas)()).To(Succeed()) + return deploymentStatusr(deploy, deployStatus)() + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployWasUpdated(nil) + + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, func() error { + deploy.Spec.Replicas = pointer.Int32Ptr(5) + deploy.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }} + return nil + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployStatusWasUpdated(nil) + }) + + It("errors when MutateFn changes object name on creation", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, func() error { + Expect(specr()).To(Succeed()) + return deploymentRenamer(deploy)() + }) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when MutateFn renames an object", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, deploymentRenamer(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when object namespace changes", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrPatch(context.TODO(), c, deploy, deploymentNamespaceChanger(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("aborts immediately if there was an error initially retrieving the object", func() { + op, err := controllerutil.CreateOrPatch(context.TODO(), errorReader{c}, deploy, func() error { + Fail("Mutation method should not run") + return nil + }) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("Finalizers", func() { + var deploy *appsv1.Deployment + + Describe("AddFinalizer", func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + } + + It("should add the finalizer when not present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + + It("should not add the finalizer when already present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + Describe("RemoveFinalizer", func() { + It("should remove finalizer if present", func() { + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + + It("should remove all equal finalizers if present", func() { + deploy.SetFinalizers(append(deploy.Finalizers, testFinalizer, testFinalizer)) + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + + Describe("AddFinalizer, which returns an indication of whether it modified the object's list of finalizers,", func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + } + + When("the object's list of finalizers has no instances of the input finalizer", func() { + It("should return true", func() { + Expect(controllerutil.AddFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should add the input finalizer to the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + When("the object's list of finalizers has an instance of the input finalizer", func() { + It("should return false", func() { + Expect(controllerutil.AddFinalizer(deploy, testFinalizer)).To(BeFalse()) + }) + It("should not modify the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + }) + + Describe("RemoveFinalizer, which returns an indication of whether it modified the object's list of finalizers,", func() { + When("the object's list of finalizers has no instances of the input finalizer", func() { + It("should return false", func() { + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer1)).To(BeFalse()) + }) + It("should not modify the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + When("the object's list of finalizers has one instance of the input finalizer", func() { + It("should return true", func() { + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should remove the instance of the input finalizer from the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + + When("the object's list of finalizers has multiple instances of the input finalizer", func() { + It("should return true", func() { + deploy.SetFinalizers(append(deploy.Finalizers, testFinalizer, testFinalizer)) + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should remove each instance of the input finalizer from the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + }) + + Describe("ContainsFinalizer", func() { + It("should check that finalizer is present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(controllerutil.ContainsFinalizer(deploy, testFinalizer)).To(Equal(true)) + }) + + It("should check that finalizer is not present after RemoveFinalizer call", func() { + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(controllerutil.ContainsFinalizer(deploy, testFinalizer)).To(Equal(false)) + }) + }) + }) +}) + +const testFinalizer = "foo.bar.baz" +const testFinalizer1 = testFinalizer + "1" + +var _ runtime.Object = &errRuntimeObj{} +var _ metav1.Object = &errMetaObj{} + +type errRuntimeObj struct { + runtime.TypeMeta +} + +func (o *errRuntimeObj) DeepCopyObject() runtime.Object { + return &errRuntimeObj{} +} + +type errMetaObj struct { + metav1.ObjectMeta +} + +func deploymentSpecr(deploy *appsv1.Deployment, spec appsv1.DeploymentSpec) controllerutil.MutateFn { + return func() error { + deploy.Spec = spec + return nil + } +} + +func deploymentStatusr(deploy *appsv1.Deployment, status appsv1.DeploymentStatus) controllerutil.MutateFn { + return func() error { + deploy.Status = status + return nil + } +} + +var deploymentIdentity controllerutil.MutateFn = func() error { + return nil +} + +func deploymentRenamer(deploy *appsv1.Deployment) controllerutil.MutateFn { + return func() error { + deploy.Name = fmt.Sprintf("%s-1", deploy.Name) + return nil + } +} + +func deploymentNamespaceChanger(deploy *appsv1.Deployment) controllerutil.MutateFn { + return func() error { + deploy.Namespace = fmt.Sprintf("%s-1", deploy.Namespace) + return nil + } +} + +func deploymentScaler(deploy *appsv1.Deployment, replicas int32) controllerutil.MutateFn { + fn := func() error { + deploy.Spec.Replicas = &replicas + return nil + } + return fn +} + +type errorReader struct { + client.Client +} + +func (e errorReader) Get(ctx context.Context, key client.ObjectKey, into client.Object, opts ...client.GetOption) error { + return fmt.Errorf("unexpected error") +} diff --git a/pkg/controller/controllerutil/doc.go b/pkg/controller/controllerutil/doc.go new file mode 100644 index 0000000000..ab386b29cd --- /dev/null +++ b/pkg/controller/controllerutil/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil diff --git a/pkg/controller/controllerutil/example_test.go b/pkg/controller/controllerutil/example_test.go new file mode 100644 index 0000000000..b2d6f71a5c --- /dev/null +++ b/pkg/controller/controllerutil/example_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil_test + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("controllerutil-examples") +) + +// This example creates or updates an existing deployment. +func ExampleCreateOrUpdate() { + // c is client.Client + + // Create or Update the deployment default/foo + deploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} + + op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, func() error { + // Deployment selector is immutable so we set this value only if + // a new object is going to be created + if deploy.ObjectMeta.CreationTimestamp.IsZero() { + deploy.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + } + } + + // update the Deployment pod template + deploy.Spec.Template = corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + } + + return nil + }) + + if err != nil { + log.Error(err, "Deployment reconcile failed") + } else { + log.Info("Deployment successfully reconciled", "operation", op) + } +} diff --git a/pkg/controller/doc.go b/pkg/controller/doc.go new file mode 100644 index 0000000000..228335e929 --- /dev/null +++ b/pkg/controller/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs. + +# Creation + +To create a new Controller, first create a manager.Manager and pass it to the controller.New function. +The Controller MUST be started by calling Manager.Start. +*/ +package controller diff --git a/pkg/controller/example_test.go b/pkg/controller/example_test.go new file mode 100644 index 0000000000..3d8e399703 --- /dev/null +++ b/pkg/controller/example_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller_test + +import ( + "context" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + mgr manager.Manager + // NB: don't call SetLogger in init(), or else you'll mess up logging in the main suite. + log = logf.Log.WithName("controller-examples") +) + +// This example creates a new Controller named "pod-controller" with a no-op reconcile function. The +// manager.Manager will be used to Start the Controller, and will provide it a shared Cache and Client. +func ExampleNew() { + _, err := controller.New("pod-controller", mgr, controller.Options{ + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + // Your business logic to implement the API by creating, updating, deleting objects goes here. + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "unable to create pod-controller") + os.Exit(1) + } +} + +// This example starts a new Controller named "pod-controller" to Watch Pods and call a no-op Reconciler. +func ExampleController() { + // mgr is a manager.Manager + + // Create a new Controller that will call the provided Reconciler function in response + // to events. + c, err := controller.New("pod-controller", mgr, controller.Options{ + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + // Your business logic to implement the API by creating, updating, deleting objects goes here. + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "unable to create pod-controller") + os.Exit(1) + } + + // Watch for Pod create / update / delete events and call Reconcile + err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + log.Error(err, "unable to watch pods") + os.Exit(1) + } + + // Start the Controller through the manager. + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "unable to continue running manager") + os.Exit(1) + } +} + +// This example starts a new Controller named "pod-controller" to Watch Pods with the unstructured object and call a no-op Reconciler. +func ExampleController_unstructured() { + // mgr is a manager.Manager + + // Create a new Controller that will call the provided Reconciler function in response + // to events. + c, err := controller.New("pod-controller", mgr, controller.Options{ + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + // Your business logic to implement the API by creating, updating, deleting objects goes here. + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "unable to create pod-controller") + os.Exit(1) + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Kind: "Pod", + Group: "", + Version: "v1", + }) + // Watch for Pod create / update / delete events and call Reconcile + err = c.Watch(&source.Kind{Type: u}, &handler.EnqueueRequestForObject{}) + if err != nil { + log.Error(err, "unable to watch pods") + os.Exit(1) + } + + // Start the Controller through the manager. + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "unable to continue running manager") + os.Exit(1) + } +} + +// This example creates a new controller named "pod-controller" to watch Pods +// and call a no-op reconciler. The controller is not added to the provided +// manager, and must thus be started and stopped by the caller. +func ExampleNewUnmanaged() { + // mgr is a manager.Manager + + // Configure creates a new controller but does not add it to the supplied + // manager. + c, err := controller.NewUnmanaged("pod-controller", mgr, controller.Options{ + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "unable to create pod-controller") + os.Exit(1) + } + + if err := c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}); err != nil { + log.Error(err, "unable to watch pods") + os.Exit(1) + } + + ctx, cancel := context.WithCancel(context.Background()) + + // Start our controller in a goroutine so that we do not block. + go func() { + // Block until our controller manager is elected leader. We presume our + // entire process will terminate if we lose leadership, so we don't need + // to handle that. + <-mgr.Elected() + + // Start our controller. This will block until the context is + // closed, or the controller returns an error. + if err := c.Start(ctx); err != nil { + log.Error(err, "cannot run experiment controller") + } + }() + + // Stop our controller. + cancel() +} diff --git a/pkg/controller/testdata/crds/unconventionallisttype.yaml b/pkg/controller/testdata/crds/unconventionallisttype.yaml new file mode 100644 index 0000000000..3069c473e5 --- /dev/null +++ b/pkg/controller/testdata/crds/unconventionallisttype.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: unconventionallisttypes.chaosapps.metamagical.io +spec: + group: chaosapps.metamagical.io + names: + kind: UnconventionalListType + plural: unconventionallisttypes + scope: Namespaced + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/conversion/conversion.go b/pkg/conversion/conversion.go new file mode 100644 index 0000000000..da32ab48e4 --- /dev/null +++ b/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/pkg/doc.go b/pkg/doc.go new file mode 100644 index 0000000000..89b380c108 --- /dev/null +++ b/pkg/doc.go @@ -0,0 +1,207 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package pkg provides libraries for building Controllers. Controllers implement Kubernetes APIs +and are foundational to building Operators, Workload APIs, Configuration APIs, Autoscalers, and more. + +# Client + +Client provides a Read + Write client for reading and writing Kubernetes objects. + +# Cache + +Cache provides a Read client for reading objects from a local cache. +A cache may register handlers to respond to events that update the cache. + +# Manager + +Manager is required for creating a Controller and provides the Controller shared dependencies such as +clients, caches, schemes, etc. Controllers should be Started through the Manager by calling Manager.Start. + +# Controller + +Controller implements a Kubernetes API by responding to events (object Create, Update, Delete) and ensuring that +the state specified in the Spec of the object matches the state of the system. This is called a reconcile. +If they do not match, the Controller will create / update / delete objects as needed to make them match. + +Controllers are implemented as worker queues that process reconcile.Requests (requests to reconcile the +state for a specific object). + +Unlike http handlers, Controllers DO NOT handle events directly, but enqueue Requests to eventually reconcile +the object. This means the handling of multiple events may be batched together and the full state of the +system must be read for each reconcile. + +* Controllers require a Reconciler to be provided to perform the work pulled from the work queue. + +* Controllers require Watches to be configured to enqueue reconcile.Requests in response to events. + +# Webhook + +Admission Webhooks are a mechanism for extending kubernetes APIs. Webhooks can be configured with target +event type (object Create, Update, Delete), the API server will send AdmissionRequests to them +when certain events happen. The webhooks may mutate and (or) validate the object embedded in +the AdmissionReview requests and send back the response to the API server. + +There are 2 types of admission webhook: mutating and validating admission webhook. +Mutating webhook is used to mutate a core API object or a CRD instance before the API server admits it. +Validating webhook is used to validate if an object meets certain requirements. + +* Admission Webhooks require Handler(s) to be provided to process the received AdmissionReview requests. + +# Reconciler + +Reconciler is a function provided to a Controller that may be called at anytime with the Name and Namespace of an object. +When called, the Reconciler will ensure that the state of the system matches what is specified in the object at the +time the Reconciler is called. + +Example: Reconciler invoked for a ReplicaSet object. The ReplicaSet specifies 5 replicas but only +3 Pods exist in the system. The Reconciler creates 2 more Pods and sets their OwnerReference to point at the +ReplicaSet with controller=true. + +* Reconciler contains all of the business logic of a Controller. + +* Reconciler typically works on a single object type. - e.g. it will only reconcile ReplicaSets. For separate +types use separate Controllers. If you wish to trigger reconciles from other objects, you can provide +a mapping (e.g. owner references) that maps the object that triggers the reconcile to the object being reconciled. + +* Reconciler is provided the Name / Namespace of the object to reconcile. + +* Reconciler does not care about the event contents or event type responsible for triggering the reconcile. +- e.g. it doesn't matter whether a ReplicaSet was created or updated, Reconciler will always compare the number of +Pods in the system against what is specified in the object at the time it is called. + +# Source + +resource.Source is an argument to Controller.Watch that provides a stream of events. +Events typically come from watching Kubernetes APIs (e.g. Pod Create, Update, Delete). + +Example: source.Kind uses the Kubernetes API Watch endpoint for a GroupVersionKind to provide +Create, Update, Delete events. + +* Source provides a stream of events (e.g. object Create, Update, Delete) for Kubernetes objects typically +through the Watch API. + +* Users SHOULD only use the provided Source implementations instead of implementing their own for nearly all cases. + +# EventHandler + +handler.EventHandler is an argument to Controller.Watch that enqueues reconcile.Requests in response to events. + +Example: a Pod Create event from a Source is provided to the eventhandler.EnqueueHandler, which enqueues a +reconcile.Request containing the name / Namespace of the Pod. + +* EventHandlers handle events by enqueueing reconcile.Requests for one or more objects. + +* EventHandlers MAY map an event for an object to a reconcile.Request for an object of the same type. + +* EventHandlers MAY map an event for an object to a reconcile.Request for an object of a different type - e.g. +map a Pod event to a reconcile.Request for the owning ReplicaSet. + +* EventHandlers MAY map an event for an object to multiple reconcile.Requests for objects of the same or a different +type - e.g. map a Node event to objects that respond to cluster resize events. + +* Users SHOULD only use the provided EventHandler implementations instead of implementing their own for almost +all cases. + +# Predicate + +predicate.Predicate is an optional argument to Controller.Watch that filters events. This allows common filters to be +reused and composed. + +* Predicate takes an event and returns a bool (true to enqueue) + +* Predicates are optional arguments + +* Users SHOULD use the provided Predicate implementations, but MAY implement additional +Predicates e.g. generation changed, label selectors changed etc. + +# PodController Diagram + +Source provides event: + +* &source.KindSource{&v1.Pod{}} -> (Pod foo/bar Create Event) + +EventHandler enqueues Request: + +* &handler.EnqueueRequestForObject{} -> (reconcile.Request{types.NamespaceName{Name: "foo", Namespace: "bar"}}) + +Reconciler is called with the Request: + +* Reconciler(reconcile.Request{types.NamespaceName{Name: "foo", Namespace: "bar"}}) + +# Usage + +The following example shows creating a new Controller program which Reconciles ReplicaSet objects in response +to Pod or ReplicaSet events. The Reconciler function simply adds a label to the ReplicaSet. + +See the examples/builtins/main.go for a usage example. + +Controller Example: + +1. Watch ReplicaSet and Pods Sources + +1.1 ReplicaSet -> handler.EnqueueRequestForObject - enqueue a Request with the ReplicaSet Namespace and Name. + +1.2 Pod (created by ReplicaSet) -> handler.EnqueueRequestForOwnerHandler - enqueue a Request with the +Owning ReplicaSet Namespace and Name. + +2. Reconcile ReplicaSet in response to an event + +2.1 ReplicaSet object created -> Read ReplicaSet, try to read Pods -> if is missing create Pods. + +2.2 Reconciler triggered by creation of Pods -> Read ReplicaSet and Pods, do nothing. + +2.3 Reconciler triggered by deletion of Pods from some other actor -> Read ReplicaSet and Pods, create replacement Pods. + +# Watching and EventHandling + +Controllers may Watch multiple Kinds of objects (e.g. Pods, ReplicaSets and Deployments), but they reconcile +only a single Type. When one Type of object must be updated in response to changes in another Type of object, +an EnqueueRequestsFromMapFunc may be used to map events from one type to another. e.g. Respond to a cluster resize +event (add / delete Node) by re-reconciling all instances of some API. + +A Deployment Controller might use an EnqueueRequestForObject and EnqueueRequestForOwner to: + +* Watch for Deployment Events - enqueue the Namespace and Name of the Deployment. + +* Watch for ReplicaSet Events - enqueue the Namespace and Name of the Deployment that created the ReplicaSet +(e.g the Owner) + +Note: reconcile.Requests are deduplicated when they are enqueued. Many Pod Events for the same ReplicaSet +may trigger only 1 reconcile invocation as each Event results in the Handler trying to enqueue +the same reconcile.Request for the ReplicaSet. + +# Controller Writing Tips + +Reconciler Runtime Complexity: + +* It is better to write Controllers to perform an O(1) reconcile N times (e.g. on N different objects) instead of +performing an O(N) reconcile 1 time (e.g. on a single object which manages N other objects). + +* Example: If you need to update all Services in response to a Node being added - reconcile Services but Watch +Nodes (transformed to Service object name / Namespaces) instead of Reconciling Nodes and updating Services + +Event Multiplexing: + +* reconcile.Requests for the same Name / Namespace are batched and deduplicated when they are enqueued. This allows +Controllers to gracefully handle a high volume of events for a single object. Multiplexing multiple event Sources to +a single object Type will batch requests across events for different object types. + +* Example: Pod events for a ReplicaSet are transformed to a ReplicaSet Name / Namespace, so the ReplicaSet +will be Reconciled only 1 time for multiple events from multiple Pods. +*/ +package pkg diff --git a/pkg/envtest/crd.go b/pkg/envtest/crd.go new file mode 100644 index 0000000000..3b52ae8f99 --- /dev/null +++ b/pkg/envtest/crd.go @@ -0,0 +1,456 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// CRDInstallOptions are the options for installing CRDs. +type CRDInstallOptions struct { + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Paths is a list of paths to the directories or files containing CRDs + Paths []string + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1.CustomResourceDefinition + + // ErrorIfPathMissing will cause an error if a Path does not exist + ErrorIfPathMissing bool + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration + + // CleanUpAfterUse will cause the CRDs listed for installation to be + // uninstalled when terminating the test environment. + // Defaults to false. + CleanUpAfterUse bool + + // WebhookOptions contains the conversion webhook information to install + // on the CRDs. This field is usually inherited by the EnvTest options. + // + // If you're passing this field manually, you need to make sure that + // the CA information and host port is filled in properly. + WebhookOptions WebhookInstallOptions +} + +const defaultPollInterval = 100 * time.Millisecond +const defaultMaxWait = 10 * time.Second + +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory. +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + defaultCRDOptions(&options) + + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return nil, fmt.Errorf("unable to read CRD files: %w", err) + } + + if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil { + return nil, err + } + + // Create the CRDs in the apiserver + if err := CreateCRDs(config, options.CRDs); err != nil { + return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err) + } + + // Wait for the CRDs to appear as Resources in the apiserver + if err := WaitForCRDs(config, options.CRDs, options); err != nil { + return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err) + } + + return options.CRDs, nil +} + +// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs. +func readCRDFiles(options *CRDInstallOptions) error { + if len(options.Paths) > 0 { + crdList, err := renderCRDs(options) + if err != nil { + return err + } + + options.CRDs = append(options.CRDs, crdList...) + } + return nil +} + +// defaultCRDOptions sets the default values for CRDs. +func defaultCRDOptions(o *CRDInstallOptions) { + if o.Scheme == nil { + o.Scheme = scheme.Scheme + } + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForCRDs waits for the CRDs to appear in discovery. +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error { + // Add each CRD to a map of GroupVersion to Resource + waitingFor := map[schema.GroupVersion]*sets.String{} + for _, crd := range crds { + gvs := []schema.GroupVersion{} + for _, version := range crd.Spec.Versions { + if version.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) + } + } + + for _, gv := range gvs { + log.V(1).Info("adding API in waitlist", "GV", gv) + if _, found := waitingFor[gv]; !found { + // Initialize the set + waitingFor[gv] = &sets.String{} + } + // Add the Resource + waitingFor[gv].Insert(crd.Spec.Names.Plural) + } + } + + // Poll until all resources are found in discovery + p := &poller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type poller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersion]*sets.String +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *poller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + cs, err := clientset.NewForConfig(p.config) + if err != nil { + return false, err + } + + allFound := true + for gv, resources := range p.waitingFor { + // All resources found, do nothing + if resources.Len() == 0 { + delete(p.waitingFor, gv) + continue + } + + // Get the Resources for this GroupVersion + // TODO: Maybe the controller-runtime client should be able to do this... + resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) + if err != nil { + return false, nil //nolint:nilerr + } + + // Remove each found resource from the resources set that we are waiting for + for _, resource := range resourceList.APIResources { + resources.Delete(resource.Name) + } + + // Still waiting on some resources in this group version + if resources.Len() != 0 { + allFound = false + } + } + return allFound, nil +} + +// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory. +func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return err + } + + // Delete the CRDs from the apiserver + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Uninstall each CRD + for _, crd := range options.CRDs { + crd := crd + log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) + if err := cs.Delete(context.TODO(), crd); err != nil { + // If CRD is not found, we can consider success + if !apierrors.IsNotFound(err) { + return err + } + } + } + + return nil +} + +// CreateCRDs creates the CRDs. +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + // Create each CRD + for _, crd := range crds { + crd := crd + log.V(1).Info("installing CRD", "crd", crd.GetName()) + existingCrd := crd.DeepCopy() + err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.TODO(), crd); err != nil { + return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err) + } + case err != nil: + return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err) + default: + log.V(1).Info("CRD already exists, updating", "crd", crd.GetName()) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil { + return err + } + crd.SetResourceVersion(existingCrd.GetResourceVersion()) + return cs.Update(context.TODO(), crd) + }); err != nil { + return err + } + } + } + return nil +} + +// renderCRDs iterate through options.Paths and extract all CRD files. +func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + type GVKN struct { + GVK schema.GroupVersionKind + Name string + } + + crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{} + + for _, path := range options.Paths { + var ( + err error + info os.FileInfo + files []string + filePath = path + ) + + // Return the error if ErrorIfPathMissing exists + if info, err = os.Stat(path); os.IsNotExist(err) { + if options.ErrorIfPathMissing { + return nil, err + } + continue + } + + if !info.IsDir() { + filePath, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + log.V(1).Info("reading CRDs from path", "path", path) + crdList, err := readCRDs(filePath, files) + if err != nil { + return nil, err + } + + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] + } + } + + // Converting map to a list to return + res := []*apiextensionsv1.CustomResourceDefinition{} + for _, obj := range crds { + res = append(res, obj) + } + return res, nil +} + +// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications +// to conditionally enable webhooks if the type is registered within the scheme. +func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error { + if len(webhookOptions.LocalServingCAData) == 0 { + return nil + } + + // Determine all registered convertible types. + convertibles := map[schema.GroupKind]struct{}{} + for gvk := range scheme.AllKnownTypes() { + obj, err := scheme.New(gvk) + if err != nil { + return err + } + if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil { + convertibles[gvk.GroupKind()] = struct{}{} + } + } + + // generate host port. + hostPort, err := webhookOptions.generateHostPort() + if err != nil { + return err + } + url := pointer.StringPtr(fmt.Sprintf("https://%s/convert", hostPort)) + + for i := range crds { + // Continue if we're preserving unknown fields. + if crds[i].Spec.PreserveUnknownFields { + continue + } + // Continue if the GroupKind isn't registered as being convertible. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + continue + } + if crds[i].Spec.Conversion == nil { + crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ + Webhook: &apiextensionsv1.WebhookConversion{}, + } + } + crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter + crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"} + crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{ + Service: nil, + URL: url, + CABundle: webhookOptions.LocalServingCAData, + } + } + + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs. +func readCRDs(basePath string, files []string) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var crds []*apiextensionsv1.CustomResourceDefinition + + // White list the file extensions that may contain CRDs + crdExts := sets.NewString(".json", ".yaml", ".yml") + + for _, file := range files { + // Only parse allowlisted file types + if !crdExts.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal CRDs from file into structs + docs, err := readDocuments(filepath.Join(basePath, file)) + if err != nil { + return nil, err + } + + for _, doc := range docs { + crd := &apiextensionsv1.CustomResourceDefinition{} + if err = yaml.Unmarshal(doc, crd); err != nil { + return nil, err + } + + if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + continue + } + crds = append(crds, crd) + } + + log.V(1).Info("read CRDs from file", "file", file) + } + return crds, nil +} + +// readDocuments reads documents from file. +func readDocuments(fp string) ([][]byte, error) { + b, err := os.ReadFile(fp) + if err != nil { + return nil, err + } + + docs := [][]byte{} + reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/pkg/envtest/crd_test.go b/pkg/envtest/crd_test.go new file mode 100644 index 0000000000..2c12ba57b4 --- /dev/null +++ b/pkg/envtest/crd_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/sets" +) + +var _ = Describe("Test", func() { + Describe("readCRDFiles", func() { + It("should not mix up files from different directories", func() { + opt := CRDInstallOptions{ + Paths: []string{ + "testdata/crds", + "testdata/crdv1_original", + }, + } + err := readCRDFiles(&opt) + Expect(err).NotTo(HaveOccurred()) + + expectedCRDs := sets.NewString( + "frigates.ship.example.com", + "configs.foo.example.com", + "drivers.crew.example.com", + ) + + foundCRDs := sets.NewString() + for _, crd := range opt.CRDs { + foundCRDs.Insert(crd.Name) + } + + Expect(expectedCRDs).To(Equal(foundCRDs)) + }) + }) +}) diff --git a/pkg/envtest/doc.go b/pkg/envtest/doc.go new file mode 100644 index 0000000000..412e794cc8 --- /dev/null +++ b/pkg/envtest/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envtest provides libraries for integration testing by starting a local control plane +// +// Control plane binaries (etcd and kube-apiserver) are loaded by default from +// /usr/local/kubebuilder/bin. This can be overridden by setting the +// KUBEBUILDER_ASSETS environment variable, or by directly creating a +// ControlPlane for the Environment to use. +// +// Environment can also be configured to work with an existing cluster, and +// simply load CRDs and provide client configuration. +package envtest diff --git a/pkg/envtest/envtest_suite_test.go b/pkg/envtest/envtest_suite_test.go new file mode 100644 index 0000000000..0d5bb0eae2 --- /dev/null +++ b/pkg/envtest/envtest_suite_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Envtest Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var env *Environment + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + env = &Environment{} + // we're initializing webhook here and not in webhook.go to also test the envtest install code via WebhookOptions + initializeWebhookInEnvironment() + _, err := env.Start() + Expect(err).NotTo(HaveOccurred()) +}, StartTimeout) + +func initializeWebhookInEnvironment() { + namespacedScopeV1 := admissionv1.NamespacedScope + failedTypeV1 := admissionv1.Fail + equivalentTypeV1 := admissionv1.Equivalent + noSideEffectsV1 := admissionv1.SideEffectClassNone + webhookPathV1 := "/failing" + + env.WebhookInstallOptions = WebhookInstallOptions{ + ValidatingWebhooks: []*admissionv1.ValidatingWebhookConfiguration{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + }, + } +} + +var _ = AfterSuite(func() { + Expect(env.Stop()).NotTo(HaveOccurred()) +}, StopTimeout) diff --git a/pkg/envtest/envtest_test.go b/pkg/envtest/envtest_test.go new file mode 100644 index 0000000000..11eedca0ac --- /dev/null +++ b/pkg/envtest/envtest_test.go @@ -0,0 +1,961 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Test", func() { + var crds []*apiextensionsv1.CustomResourceDefinition + var err error + var s *runtime.Scheme + var c client.Client + + var validDirectory = filepath.Join(".", "testdata") + var invalidDirectory = "fake" + + var teardownTimeoutSeconds float64 = 10 + + // Initialize the client + BeforeEach(func() { + crds = []*apiextensionsv1.CustomResourceDefinition{} + s = scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + Expect(err).NotTo(HaveOccurred()) + + c, err = client.New(env.Config, client.Options{Scheme: s}) + Expect(err).NotTo(HaveOccurred()) + }) + + // Cleanup CRDs + AfterEach(func() { + for _, crd := range crds { + crd := crd + // Delete only if CRD exists. + crdObjectKey := client.ObjectKey{ + Name: crd.GetName(), + } + var placeholder apiextensionsv1.CustomResourceDefinition + if err = c.Get(context.TODO(), crdObjectKey, &placeholder); err != nil && + apierrors.IsNotFound(err) { + // CRD doesn't need to be deleted. + continue + } + Expect(err).NotTo(HaveOccurred()) + Expect(c.Delete(context.TODO(), crd)).To(Succeed()) + Eventually(func() bool { + err := c.Get(context.TODO(), crdObjectKey, &placeholder) + return apierrors.IsNotFound(err) + }, 5*time.Second).Should(BeTrue()) + } + }, teardownTimeoutSeconds) + + Describe("InstallCRDs", func() { + It("should install the unserved CRDs into the cluster", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crds", "examplecrd_unserved.yaml")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "frigates.ship.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Frigate")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "ship.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "frigates", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: false, + }, + { + Name: "v1beta1", + Storage: false, + Served: false, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + It("should install the CRDs into the cluster using directory", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{validDirectory}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }, 5) + + It("should install the CRDs into the cluster using file", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crds", "examplecrd3.yaml")}, + }) + Expect(err).NotTo(HaveOccurred()) + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "configs.foo.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Config")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "foo.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "configs", + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }, 10) + + It("should be able to install CRDs using multiple files", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{ + filepath.Join(".", "testdata", "examplecrd.yaml"), + filepath.Join(".", "testdata", "examplecrd_v1.yaml"), + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(crds).To(HaveLen(2)) + }, 10) + + It("should filter out already existent CRD", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{ + filepath.Join(".", "testdata"), + filepath.Join(".", "testdata", "examplecrd1.yaml"), + }, + }) + Expect(err).NotTo(HaveOccurred()) + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }, 10) + + It("should not return an not error if the directory doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{invalidDirectory}}) + Expect(err).NotTo(HaveOccurred()) + }, 5) + + It("should return an error if the directory doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{invalidDirectory}, ErrorIfPathMissing: true, + }) + Expect(err).To(HaveOccurred()) + }, 5) + + It("should return an error if the file doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{ + filepath.Join(".", "testdata", "fake.yaml")}, ErrorIfPathMissing: true, + }) + Expect(err).To(HaveOccurred()) + }, 5) + + It("should return an error if the resource group version isn't found", func() { + // Wait for a CRD where the Group and Version don't exist + err := WaitForCRDs(env.Config, + []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "notfound", + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).To(HaveOccurred()) + }, 5) + + It("should return an error if the resource isn't found in the group version", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{"."}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Wait for a CRD that doesn't exist, but the Group and Version do + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "fake", + }}, + }}, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).To(HaveOccurred()) + }, 5) + + It("should reinstall the CRDs if already present in the cluster", func() { + + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + // Try to re-install the CRDs + + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }, 5) + }) + + It("should update CRDs if already present in the cluster", func() { + + // Install only the CRDv1 multi-version example + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + Expect(len(crd.Spec.Versions)).To(BeEquivalentTo(2)) + + // Store resource version for comparison later on + firstRV := crd.ResourceVersion + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + // Add one more version and update + _, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crdv1_updated")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find updated CRD + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + Expect(len(crd.Spec.Versions)).To(BeEquivalentTo(3)) + Expect(crd.ResourceVersion).NotTo(BeEquivalentTo(firstRV)) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + { + Name: "v3", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }, 5) + + Describe("UninstallCRDs", func() { + It("should uninstall the CRDs from the cluster", func() { + + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{validDirectory}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + err = UninstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{validDirectory}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to NOT find the CRDs + + crds := []string{ + "foos.bar.example.com", + "bazs.qux.example.com", + "captains.crew.example.com", + "firstmates.crew.example.com", + "drivers.crew.example.com", + } + placeholder := &apiextensionsv1.CustomResourceDefinition{} + Eventually(func() bool { + for _, crd := range crds { + err = c.Get(context.TODO(), types.NamespacedName{Name: crd}, placeholder) + notFound := err != nil && apierrors.IsNotFound(err) + if !notFound { + return false + } + } + return true + }, 20).Should(BeTrue()) + }, 30) + }) + + Describe("Start", func() { + It("should raise an error on invalid dir when flag is enabled", func() { + env := &Environment{ErrorIfCRDPathMissing: true, CRDDirectoryPaths: []string{invalidDirectory}} + _, err := env.Start() + Expect(err).To(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) + }, 30) + + It("should not raise an error on invalid dir when flag is disabled", func() { + env := &Environment{ErrorIfCRDPathMissing: false, CRDDirectoryPaths: []string{invalidDirectory}} + _, err := env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) + }, 30) + }) + + Describe("Stop", func() { + It("should cleanup webhook /tmp folder with no error when using existing cluster", func() { + env := &Environment{} + _, err := env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) + + // check if the /tmp/envtest-serving-certs-* dir doesnt exists any more + Expect(env.WebhookInstallOptions.LocalServingCertDir).ShouldNot(BeADirectory()) + }, 30) + }) +}) diff --git a/pkg/envtest/ginkgo_test.go b/pkg/envtest/ginkgo_test.go new file mode 100644 index 0000000000..fba031c954 --- /dev/null +++ b/pkg/envtest/ginkgo_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results +// are correctly parsed by test automation. +// See issue https://github.com/jstemmer/go-junit-report/issues/31 +// It's re-exported here to avoid compatibility breakage/mass rewrites. +type NewlineReporter = printer.NewlineReporter diff --git a/pkg/envtest/helper.go b/pkg/envtest/helper.go new file mode 100644 index 0000000000..d3b52017d2 --- /dev/null +++ b/pkg/envtest/helper.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + crdScheme = scheme.Scheme +) + +// init is required to correctly initialize the crdScheme package variable. +func init() { + _ = apiextensionsv1.AddToScheme(crdScheme) +} + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, obj := range s1 { + m[obj.GetName()] = obj + } + for _, obj := range s2 { + m[obj.GetName()] = obj + } + merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m)) + i := 0 + for _, obj := range m { + merged[i] = obj.DeepCopy() + i++ + } + return merged +} diff --git a/pkg/envtest/komega/OWNERS b/pkg/envtest/komega/OWNERS new file mode 100644 index 0000000000..ba347dae2b --- /dev/null +++ b/pkg/envtest/komega/OWNERS @@ -0,0 +1,14 @@ +approvers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers + - schrej + - JoelSpeed + - sbueringer +reviewers: + - controller-runtime-admins + - controller-runtime-reviewers + - controller-runtime-approvers + - schrej + - JoelSpeed + - sbueringer diff --git a/pkg/envtest/komega/default.go b/pkg/envtest/komega/default.go new file mode 100644 index 0000000000..b243b922d5 --- /dev/null +++ b/pkg/envtest/komega/default.go @@ -0,0 +1,104 @@ +package komega + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// defaultK is the Komega used by the package global functions. +var defaultK = &komega{ctx: context.Background()} + +// SetClient sets the client used by the package global functions. +func SetClient(c client.Client) { + defaultK.client = c +} + +// SetContext sets the context used by the package global functions. +func SetContext(c context.Context) { + defaultK.ctx = c +} + +func checkDefaultClient() { + if defaultK.client == nil { + panic("Default Komega's client is not set. Use SetClient to set it.") + } +} + +// Get returns a function that fetches a resource and returns the occurring error. +// It can be used with gomega.Eventually() like this +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(komega.Get(&deployment)).To(gomega.Succeed()) +// +// By calling the returned function directly it can also be used with gomega.Expect(komega.Get(...)()).To(...) +func Get(obj client.Object) func() error { + checkDefaultClient() + return defaultK.Get(obj) +} + +// List returns a function that lists resources and returns the occurring error. +// It can be used with gomega.Eventually() like this +// +// deployments := v1.DeploymentList{ ... } +// gomega.Eventually(k.List(&deployments)).To(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...) +func List(list client.ObjectList, opts ...client.ListOption) func() error { + checkDefaultClient() + return defaultK.List(list, opts...) +} + +// Update returns a function that fetches a resource, applies the provided update function and then updates the resource. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.Update(&deployment, func (o client.Object) { +// deployment.Spec.Replicas = 3 +// return &deployment +// })).To(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...) +func Update(obj client.Object, f func(), opts ...client.UpdateOption) func() error { + checkDefaultClient() + return defaultK.Update(obj, f, opts...) +} + +// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.UpdateStatus(&deployment, func (o client.Object) { +// deployment.Status.AvailableReplicas = 1 +// return &deployment +// })).To(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...) +func UpdateStatus(obj client.Object, f func(), opts ...client.UpdateOption) func() error { + checkDefaultClient() + return defaultK.UpdateStatus(obj, f, opts...) +} + +// Object returns a function that fetches a resource and returns the object. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.Object(&deployment)).To(HaveField("Spec.Replicas", gomega.Equal(pointer.Int32(3)))) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...) +func Object(obj client.Object) func() (client.Object, error) { + checkDefaultClient() + return defaultK.Object(obj) +} + +// ObjectList returns a function that fetches a resource and returns the object. +// It can be used with gomega.Eventually() like this: +// +// deployments := appsv1.DeploymentList{ ... } +// gomega.Eventually(k.ObjectList(&deployments)).To(HaveField("Items", HaveLen(1))) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...) +func ObjectList(list client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) { + checkDefaultClient() + return defaultK.ObjectList(list, opts...) +} diff --git a/pkg/envtest/komega/default_test.go b/pkg/envtest/komega/default_test.go new file mode 100644 index 0000000000..238a4abd9e --- /dev/null +++ b/pkg/envtest/komega/default_test.go @@ -0,0 +1,116 @@ +package komega + +import ( + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestDefaultGet(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(Get(&fetched)).Should(Succeed()) + + g.Expect(*fetched.Spec.Replicas).To(BeEquivalentTo(5)) +} + +func TestDefaultList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + list := appsv1.DeploymentList{} + g.Eventually(List(&list)).Should(Succeed()) + + g.Expect(list.Items).To(HaveLen(1)) + depl := exampleDeployment() + g.Expect(list.Items[0]).To(And( + HaveField("ObjectMeta.Name", Equal(depl.ObjectMeta.Name)), + HaveField("ObjectMeta.Namespace", Equal(depl.ObjectMeta.Namespace)), + )) +} + +func TestDefaultUpdate(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(Update(&updateDeployment, func() { + updateDeployment.Annotations = map[string]string{"updated": "true"} + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(Object(&fetched)()).To(HaveField("ObjectMeta.Annotations", HaveKeyWithValue("updated", "true"))) +} + +func TestDefaultUpdateStatus(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(UpdateStatus(&updateDeployment, func() { + updateDeployment.Status.AvailableReplicas = 1 + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(Object(&fetched)()).To(HaveField("Status.AvailableReplicas", BeEquivalentTo(1))) +} + +func TestDefaultObject(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(Object(&fetched)).Should(And( + Not(BeNil()), + HaveField("Spec.Replicas", Equal(pointer.Int32(5))), + )) +} + +func TestDefaultObjectList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + list := appsv1.DeploymentList{} + g.Eventually(ObjectList(&list)).Should(And( + Not(BeNil()), + HaveField("Items", And( + HaveLen(1), + ContainElement(HaveField("Spec.Replicas", Equal(pointer.Int32(5)))), + )), + )) +} diff --git a/pkg/envtest/komega/equalobject.go b/pkg/envtest/komega/equalobject.go new file mode 100644 index 0000000000..06fe68d571 --- /dev/null +++ b/pkg/envtest/komega/equalobject.go @@ -0,0 +1,298 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// These package variables hold pre-created commonly used options that can be used to reduce the manual work involved in +// identifying the paths that need to be compared for testing equality between objects. +var ( + // IgnoreAutogeneratedMetadata contains the paths for all the metadata fields that are commonly set by the + // client and APIServer. This is used as a MatchOption for situations when only user-provided metadata is relevant. + IgnoreAutogeneratedMetadata = IgnorePaths{ + "metadata.uid", + "metadata.generation", + "metadata.creationTimestamp", + "metadata.resourceVersion", + "metadata.managedFields", + "metadata.deletionGracePeriodSeconds", + "metadata.deletionTimestamp", + "metadata.selfLink", + "metadata.generateName", + } +) + +type diffPath struct { + types []string + json []string +} + +// equalObjectMatcher is a Gomega matcher used to establish equality between two Kubernetes runtime.Objects. +type equalObjectMatcher struct { + // original holds the object that will be used to Match. + original runtime.Object + + // diffPaths contains the paths that differ between two objects. + diffPaths []diffPath + + // options holds the options that identify what should and should not be matched. + options *EqualObjectOptions +} + +// EqualObject returns a Matcher for the passed Kubernetes runtime.Object with the passed Options. This function can be +// used as a Gomega Matcher in Gomega Assertions. +func EqualObject(original runtime.Object, opts ...EqualObjectOption) types.GomegaMatcher { + matchOptions := &EqualObjectOptions{} + matchOptions = matchOptions.ApplyOptions(opts) + + return &equalObjectMatcher{ + options: matchOptions, + original: original, + } +} + +// Match compares the current object to the passed object and returns true if the objects are the same according to +// the Matcher and MatchOptions. +func (m *equalObjectMatcher) Match(actual interface{}) (success bool, err error) { + // Nil checks required first here for: + // 1) Nil equality which returns true + // 2) One object nil which returns an error + actualIsNil := reflect.ValueOf(actual).IsNil() + originalIsNil := reflect.ValueOf(m.original).IsNil() + + if actualIsNil && originalIsNil { + return true, nil + } + if actualIsNil || originalIsNil { + return false, fmt.Errorf("can not compare an object with a nil. original %v , actual %v", m.original, actual) + } + + m.diffPaths = m.calculateDiff(actual) + return len(m.diffPaths) == 0, nil +} + +// FailureMessage returns a message comparing the full objects after an unexpected failure to match has occurred. +func (m *equalObjectMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("the following fields were expected to match but did not:\n%v\n%s", m.diffPaths, + format.Message(actual, "expected to match", m.original)) +} + +// NegatedFailureMessage returns a string stating that all fields matched, even though that was not expected. +func (m *equalObjectMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "it was expected that some fields do not match, but all of them did" +} + +func (d diffPath) String() string { + return fmt.Sprintf("(%s/%s)", strings.Join(d.types, "."), strings.Join(d.json, ".")) +} + +// diffReporter is a custom recorder for cmp.Diff which records all paths that are +// different between two objects. +type diffReporter struct { + stack []cmp.PathStep + + diffPaths []diffPath +} + +func (r *diffReporter) PushStep(s cmp.PathStep) { + r.stack = append(r.stack, s) +} + +func (r *diffReporter) Report(res cmp.Result) { + if !res.Equal() { + r.diffPaths = append(r.diffPaths, r.currentPath()) + } +} + +// currentPath converts the current stack into string representations that match +// the IgnorePaths and MatchPaths syntax. +func (r *diffReporter) currentPath() diffPath { + p := diffPath{types: []string{""}, json: []string{""}} + for si, s := range r.stack[1:] { + switch s := s.(type) { + case cmp.StructField: + p.types = append(p.types, s.String()[1:]) + // fetch the type information from the parent struct. + // Note: si has an offset of 1 compared to r.stack as we loop over r.stack[1:], so we don't need -1 + field := r.stack[si].Type().Field(s.Index()) + p.json = append(p.json, strings.Split(field.Tag.Get("json"), ",")[0]) + case cmp.SliceIndex: + key := fmt.Sprintf("[%d]", s.Key()) + p.types[len(p.types)-1] += key + p.json[len(p.json)-1] += key + case cmp.MapIndex: + key := fmt.Sprintf("%v", s.Key()) + if strings.ContainsAny(key, ".[]/\\") { + key = fmt.Sprintf("[%s]", key) + p.types[len(p.types)-1] += key + p.json[len(p.json)-1] += key + } else { + p.types = append(p.types, key) + p.json = append(p.json, key) + } + } + } + // Empty strings were added as the first element. If they're still empty, remove them again. + if len(p.json) > 0 && len(p.json[0]) == 0 { + p.json = p.json[1:] + p.types = p.types[1:] + } + return p +} + +func (r *diffReporter) PopStep() { + r.stack = r.stack[:len(r.stack)-1] +} + +// calculateDiff calculates the difference between two objects and returns the +// paths of the fields that do not match. +func (m *equalObjectMatcher) calculateDiff(actual interface{}) []diffPath { + var original interface{} = m.original + // Remove the wrapping Object from unstructured.Unstructured to make comparison behave similar to + // regular objects. + if u, isUnstructured := actual.(*unstructured.Unstructured); isUnstructured { + actual = u.Object + } + if u, ok := m.original.(*unstructured.Unstructured); ok { + original = u.Object + } + r := diffReporter{} + cmp.Diff(original, actual, cmp.Reporter(&r)) + return filterDiffPaths(*m.options, r.diffPaths) +} + +// filterDiffPaths filters the diff paths using the paths in EqualObjectOptions. +func filterDiffPaths(opts EqualObjectOptions, paths []diffPath) []diffPath { + result := []diffPath{} + + for _, p := range paths { + if len(opts.matchPaths) > 0 && !hasAnyPathPrefix(p, opts.matchPaths) { + continue + } + if hasAnyPathPrefix(p, opts.ignorePaths) { + continue + } + + result = append(result, p) + } + + return result +} + +// hasPathPrefix compares the segments of a path. +func hasPathPrefix(path []string, prefix []string) bool { + for i, p := range prefix { + if i >= len(path) { + return false + } + // return false if a segment doesn't match + if path[i] != p && (i < len(prefix)-1 || !segmentHasPrefix(path[i], p)) { + return false + } + } + return true +} + +func segmentHasPrefix(s, prefix string) bool { + return len(s) >= len(prefix) && s[0:len(prefix)] == prefix && + // if it is a prefix match, make sure the next character is a [ for array/map access + (len(s) == len(prefix) || s[len(prefix)] == '[') +} + +// hasAnyPathPrefix returns true if path matches any of the path prefixes. +// It respects the name boundaries within paths, so 'ObjectMeta.Name' does not +// match 'ObjectMeta.Namespace' for example. +func hasAnyPathPrefix(path diffPath, prefixes [][]string) bool { + for _, prefix := range prefixes { + if hasPathPrefix(path.types, prefix) || hasPathPrefix(path.json, prefix) { + return true + } + } + return false +} + +// EqualObjectOption describes an Option that can be applied to a Matcher. +type EqualObjectOption interface { + // ApplyToEqualObjectMatcher applies this configuration to the given MatchOption. + ApplyToEqualObjectMatcher(options *EqualObjectOptions) +} + +// EqualObjectOptions holds the available types of EqualObjectOptions that can be applied to a Matcher. +type EqualObjectOptions struct { + ignorePaths [][]string + matchPaths [][]string +} + +// ApplyOptions adds the passed MatchOptions to the MatchOptions struct. +func (o *EqualObjectOptions) ApplyOptions(opts []EqualObjectOption) *EqualObjectOptions { + for _, opt := range opts { + opt.ApplyToEqualObjectMatcher(o) + } + return o +} + +// IgnorePaths instructs the Matcher to ignore given paths when computing a diff. +// Paths are written in a syntax similar to Go with a few special cases. Both types and +// json/yaml field names are supported. +// +// Regular Paths: +// * "ObjectMeta.Name" +// * "metadata.name" +// Arrays: +// * "metadata.ownerReferences[0].name" +// Maps, if they do not contain any of .[]/\: +// * "metadata.labels.something" +// Maps, if they contain any of .[]/\: +// * "metadata.labels[kubernetes.io/something]" +type IgnorePaths []string + +// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions. +func (i IgnorePaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) { + for _, p := range i { + opts.ignorePaths = append(opts.ignorePaths, strings.Split(p, ".")) + } +} + +// MatchPaths instructs the Matcher to restrict its diff to the given paths. If empty the Matcher will look at all paths. +// Paths are written in a syntax similar to Go with a few special cases. Both types and +// json/yaml field names are supported. +// +// Regular Paths: +// * "ObjectMeta.Name" +// * "metadata.name" +// Arrays: +// * "metadata.ownerReferences[0].name" +// Maps, if they do not contain any of .[]/\: +// * "metadata.labels.something" +// Maps, if they contain any of .[]/\: +// * "metadata.labels[kubernetes.io/something]" +type MatchPaths []string + +// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions. +func (i MatchPaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) { + for _, p := range i { + opts.matchPaths = append(opts.ignorePaths, strings.Split(p, ".")) + } +} diff --git a/pkg/envtest/komega/equalobject_test.go b/pkg/envtest/komega/equalobject_test.go new file mode 100644 index 0000000000..9fe10d1779 --- /dev/null +++ b/pkg/envtest/komega/equalobject_test.go @@ -0,0 +1,662 @@ +package komega + +import ( + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestEqualObjectMatcher(t *testing.T) { + cases := []struct { + name string + original client.Object + modified client.Object + options []EqualObjectOption + want bool + }{ + { + name: "succeed with equal objects", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + want: true, + }, + { + name: "fail with non equal objects", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + }, + }, + want: false, + }, + { + name: "succeeds if ignored fields do not match", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{"somelabel": "somevalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "controller", + }}, + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + Labels: map[string]string{"somelabel": "anothervalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "another", + }}, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "ObjectMeta.Name", + "ObjectMeta.CreationTimestamp", + "ObjectMeta.Labels.somelabel", + "ObjectMeta.OwnerReferences[0].Name", + "Spec.Template.ObjectMeta", + }, + }, + }, + { + name: "succeeds if ignored fields in json notation do not match", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{"somelabel": "somevalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "controller", + }}, + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + Labels: map[string]string{"somelabel": "anothervalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "another", + }}, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "metadata.name", + "metadata.creationTimestamp", + "metadata.labels.somelabel", + "metadata.ownerReferences[0].name", + "spec.template.metadata", + }, + }, + }, + { + name: "succeeds if all allowed fields match, and some others do not", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "special", + }, + }, + want: true, + options: []EqualObjectOption{ + MatchPaths{ + "ObjectMeta.Name", + }, + }, + }, + { + name: "works with unstructured.Unstructured", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "something", + "namespace": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "somethingelse", + "namespace": "test", + }, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "metadata.name", + }, + }, + }, + + // Test when objects are equal. + { + name: "Equal field (spec) both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: true, + }, + + { + name: "Equal nested field both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: true, + }, + + // Test when there is a difference between the objects. + { + name: "Unequal field both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar-changed", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Unequal nested field both in original and modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A-Changed", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: false, + }, + + { + name: "Value of type map with different values", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "map": map[string]string{ + "A": "A-changed", + "B": "B", + // C missing + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "map": map[string]string{ + "A": "A", + // B missing + "C": "C", + }, + }, + }, + }, + want: false, + }, + + { + name: "Value of type Array or Slice with same length but different values", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "slice": []string{ + "D", + "C", + "B", + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "slice": []string{ + "A", + "B", + "C", + }, + }, + }, + }, + want: false, + }, + + // This tests specific behaviour in how Kubernetes marshals the zero value of metav1.Time{}. + { + name: "Creation timestamp set to empty value on both original and modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": metav1.Time{}, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": metav1.Time{}, + }, + }, + }, + want: true, + }, + + // Cases to test diff when fields exist only in modified object. + { + name: "Field only in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Nested field only in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: false, + }, + { + name: "Creation timestamp exists on modified but not on original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": "2021-11-03T11:05:17Z", + }, + }, + }, + want: false, + }, + + // Test when fields exists only in the original object. + { + name: "Field only in original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + want: false, + }, + { + name: "Nested field only in original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + want: false, + }, + { + name: "Creation timestamp exists on original but not on modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": "2021-11-03T11:05:17Z", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + + want: false, + }, + + // Test metadata fields computed by the system or in status are compared. + { + name: "Unequal Metadata fields computed by the system or in status", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "selfLink": "foo", + "uid": "foo", + "resourceVersion": "foo", + "generation": "foo", + "managedFields": "foo", + }, + "status": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Unequal labels and annotations", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "foo": "bar", + }, + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + want: false, + }, + + // Ignore fields MatchOption + { + name: "Unequal metadata fields ignored by IgnorePaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + "selfLink": "foo", + "uid": "foo", + "resourceVersion": "foo", + "generation": "foo", + "managedFields": "foo", + }, + }, + }, + options: []EqualObjectOption{IgnoreAutogeneratedMetadata}, + want: true, + }, + { + name: "Unequal labels and annotations ignored by IgnorePaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + "labels": map[string]interface{}{ + "foo": "bar", + }, + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"metadata.labels", "metadata.annotations"}}, + want: true, + }, + { + name: "Ignore fields are not compared", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{}, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "", + "port": 0, + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"spec.controlPlaneEndpoint"}}, + want: true, + }, + { + name: "Not-ignored fields are still compared", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{}, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "ignored": "somevalue", + "superflous": "shouldcausefailure", + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"metadata.annotations.ignored"}}, + want: false, + }, + + // MatchPaths MatchOption + { + name: "Unequal metadata fields not compared by setting MatchPaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "uid": "foo", + }, + }, + }, + options: []EqualObjectOption{MatchPaths{"spec"}}, + want: true, + }, + + // More tests + { + name: "No changes", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + "C": "C", // C only in original + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + }, + }, + }, + want: false, + }, + { + name: "Many changes", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + // B missing + "C": "C", // C only in original + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + }, + }, + }, + want: false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + g := NewWithT(t) + m := EqualObject(c.original, c.options...) + success, _ := m.Match(c.modified) + if !success { + t.Log(m.FailureMessage(c.modified)) + } + g.Expect(success).To(Equal(c.want)) + }) + } +} diff --git a/pkg/envtest/komega/interfaces.go b/pkg/envtest/komega/interfaces.go new file mode 100644 index 0000000000..0ec3fe0236 --- /dev/null +++ b/pkg/envtest/komega/interfaces.go @@ -0,0 +1,78 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Komega is a collection of utilites for writing tests involving a mocked +// Kubernetes API. +type Komega interface { + // Get returns a function that fetches a resource and returns the occurring error. + // It can be used with gomega.Eventually() like this + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Get(&deployment)).To(gomega.Succeed()) + // By calling the returned function directly it can also be used with gomega.Expect(k.Get(...)()).To(...) + Get(client.Object) func() error + + // List returns a function that lists resources and returns the occurring error. + // It can be used with gomega.Eventually() like this + // deployments := v1.DeploymentList{ ... } + // gomega.Eventually(k.List(&deployments)).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...) + List(client.ObjectList, ...client.ListOption) func() error + + // Update returns a function that fetches a resource, applies the provided update function and then updates the resource. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Update(&deployment, func (o client.Object) { + // deployment.Spec.Replicas = 3 + // return &deployment + // })).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...) + Update(client.Object, func(), ...client.UpdateOption) func() error + + // UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Update(&deployment, func (o client.Object) { + // deployment.Status.AvailableReplicas = 1 + // return &deployment + // })).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...) + UpdateStatus(client.Object, func(), ...client.UpdateOption) func() error + + // Object returns a function that fetches a resource and returns the object. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Object(&deployment)).To(HaveField("Spec.Replicas", gomega.Equal(pointer.Int32(3)))) + // By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...) + Object(client.Object) func() (client.Object, error) + + // ObjectList returns a function that fetches a resource and returns the object. + // It can be used with gomega.Eventually() like this: + // deployments := appsv1.DeploymentList{ ... } + // gomega.Eventually(k.ObjectList(&deployments)).To(HaveField("Items", HaveLen(1))) + // By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...) + ObjectList(client.ObjectList, ...client.ListOption) func() (client.ObjectList, error) + + // WithContext returns a copy that uses the given context. + WithContext(context.Context) Komega +} diff --git a/pkg/envtest/komega/komega.go b/pkg/envtest/komega/komega.go new file mode 100644 index 0000000000..56748cb923 --- /dev/null +++ b/pkg/envtest/komega/komega.go @@ -0,0 +1,117 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// komega is a collection of utilites for writing tests involving a mocked +// Kubernetes API. +type komega struct { + ctx context.Context + client client.Client +} + +var _ Komega = &komega{} + +// New creates a new Komega instance with the given client. +func New(c client.Client) Komega { + return &komega{ + client: c, + ctx: context.Background(), + } +} + +// WithContext returns a copy that uses the given context. +func (k komega) WithContext(ctx context.Context) Komega { + k.ctx = ctx + return &k +} + +// Get returns a function that fetches a resource and returns the occurring error. +func (k *komega) Get(obj client.Object) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + return k.client.Get(k.ctx, key, obj) + } +} + +// List returns a function that lists resources and returns the occurring error. +func (k *komega) List(obj client.ObjectList, opts ...client.ListOption) func() error { + return func() error { + return k.client.List(k.ctx, obj, opts...) + } +} + +// Update returns a function that fetches a resource, applies the provided update function and then updates the resource. +func (k *komega) Update(obj client.Object, updateFunc func(), opts ...client.UpdateOption) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + err := k.client.Get(k.ctx, key, obj) + if err != nil { + return err + } + updateFunc() + return k.client.Update(k.ctx, obj, opts...) + } +} + +// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. +func (k *komega) UpdateStatus(obj client.Object, updateFunc func(), opts ...client.UpdateOption) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + err := k.client.Get(k.ctx, key, obj) + if err != nil { + return err + } + updateFunc() + return k.client.Status().Update(k.ctx, obj, opts...) + } +} + +// Object returns a function that fetches a resource and returns the object. +func (k *komega) Object(obj client.Object) func() (client.Object, error) { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() (client.Object, error) { + err := k.client.Get(k.ctx, key, obj) + return obj, err + } +} + +// ObjectList returns a function that fetches a resource and returns the object. +func (k *komega) ObjectList(obj client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) { + return func() (client.ObjectList, error) { + err := k.client.List(k.ctx, obj, opts...) + return obj, err + } +} diff --git a/pkg/envtest/komega/komega_test.go b/pkg/envtest/komega/komega_test.go new file mode 100644 index 0000000000..3a00be6441 --- /dev/null +++ b/pkg/envtest/komega/komega_test.go @@ -0,0 +1,137 @@ +package komega + +import ( + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func exampleDeployment() *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32(5), + }, + } +} + +func createFakeClient() client.Client { + return fakeclient.NewClientBuilder(). + WithObjects(exampleDeployment()). + Build() +} + +func TestGet(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(k.Get(&fetched)).Should(Succeed()) + + g.Expect(*fetched.Spec.Replicas).To(BeEquivalentTo(5)) +} + +func TestList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + list := appsv1.DeploymentList{} + g.Eventually(k.List(&list)).Should(Succeed()) + + g.Expect(list.Items).To(HaveLen(1)) + depl := exampleDeployment() + g.Expect(list.Items[0]).To(And( + HaveField("ObjectMeta.Name", Equal(depl.ObjectMeta.Name)), + HaveField("ObjectMeta.Namespace", Equal(depl.ObjectMeta.Namespace)), + )) +} + +func TestUpdate(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(k.Update(&updateDeployment, func() { + updateDeployment.Annotations = map[string]string{"updated": "true"} + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(k.Object(&fetched)()).To(HaveField("ObjectMeta.Annotations", HaveKeyWithValue("updated", "true"))) +} + +func TestUpdateStatus(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(k.UpdateStatus(&updateDeployment, func() { + updateDeployment.Status.AvailableReplicas = 1 + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(k.Object(&fetched)()).To(HaveField("Status.AvailableReplicas", BeEquivalentTo(1))) +} + +func TestObject(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(k.Object(&fetched)).Should(And( + Not(BeNil()), + HaveField("Spec.Replicas", Equal(pointer.Int32(5))), + )) +} + +func TestObjectList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + list := appsv1.DeploymentList{} + g.Eventually(k.ObjectList(&list)).Should(And( + Not(BeNil()), + HaveField("Items", And( + HaveLen(1), + ContainElement(HaveField("Spec.Replicas", Equal(pointer.Int32(5)))), + )), + )) +} diff --git a/pkg/envtest/printer/ginkgo.go b/pkg/envtest/printer/ginkgo.go new file mode 100644 index 0000000000..d835dc7721 --- /dev/null +++ b/pkg/envtest/printer/ginkgo.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package printer contains setup for a friendlier Ginkgo printer that's easier +// to parse by test automation. +package printer + +import ( + "fmt" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +var _ ginkgo.Reporter = NewlineReporter{} + +// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results +// are correctly parsed by test automation. +// See issue https://github.com/jstemmer/go-junit-report/issues/31 +type NewlineReporter struct{} + +// SpecSuiteWillBegin implements ginkgo.Reporter. +func (NewlineReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { +} + +// BeforeSuiteDidRun implements ginkgo.Reporter. +func (NewlineReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {} + +// AfterSuiteDidRun implements ginkgo.Reporter. +func (NewlineReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {} + +// SpecWillRun implements ginkgo.Reporter. +func (NewlineReporter) SpecWillRun(specSummary *types.SpecSummary) {} + +// SpecDidComplete implements ginkgo.Reporter. +func (NewlineReporter) SpecDidComplete(specSummary *types.SpecSummary) {} + +// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:". +func (NewlineReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { fmt.Printf("\n") } diff --git a/pkg/envtest/printer/prow.go b/pkg/envtest/printer/prow.go new file mode 100644 index 0000000000..2f4009aa03 --- /dev/null +++ b/pkg/envtest/printer/prow.go @@ -0,0 +1,109 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printer + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + allRegisteredSuites = sets.String{} + allRegisteredSuitesLock = &sync.Mutex{} +) + +type prowReporter struct { + junitReporter *reporters.JUnitReporter +} + +// NewProwReporter returns a prowReporter that will write out junit if running in Prow and do +// nothing otherwise. +// WARNING: It seems this does not always properly fail the test runs when there are failures, +// see https://github.com/onsi/ginkgo/issues/706 +// When using this you must make sure to grep for failures in your junit xmls and fail the run +// if there are any. +func NewProwReporter(suiteName string) ginkgo.Reporter { + allRegisteredSuitesLock.Lock() + if allRegisteredSuites.Has(suiteName) { + panic(fmt.Sprintf("Suite named %q registered more than once", suiteName)) + } + allRegisteredSuites.Insert(suiteName) + allRegisteredSuitesLock.Unlock() + + if os.Getenv("CI") == "" { + return &prowReporter{} + } + artifactsDir := os.Getenv("ARTIFACTS") + if artifactsDir == "" { + return &prowReporter{} + } + + path := filepath.Join(artifactsDir, fmt.Sprintf("junit_%s_%d.xml", suiteName, config.GinkgoConfig.ParallelNode)) + return &prowReporter{ + junitReporter: reporters.NewJUnitReporter(path), + } +} + +func (pr *prowReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + if pr.junitReporter != nil { + pr.junitReporter.SpecSuiteWillBegin(config, summary) + } +} + +// BeforeSuiteDidRun implements ginkgo.Reporter. +func (pr *prowReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if pr.junitReporter != nil { + pr.junitReporter.BeforeSuiteDidRun(setupSummary) + } +} + +// AfterSuiteDidRun implements ginkgo.Reporter. +func (pr *prowReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if pr.junitReporter != nil { + pr.junitReporter.AfterSuiteDidRun(setupSummary) + } +} + +// SpecWillRun implements ginkgo.Reporter. +func (pr *prowReporter) SpecWillRun(specSummary *types.SpecSummary) { + if pr.junitReporter != nil { + pr.junitReporter.SpecWillRun(specSummary) + } +} + +// SpecDidComplete implements ginkgo.Reporter. +func (pr *prowReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if pr.junitReporter != nil { + pr.junitReporter.SpecDidComplete(specSummary) + } +} + +// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:". +func (pr *prowReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + if pr.junitReporter != nil { + pr.junitReporter.SpecSuiteDidEnd(summary) + } +} diff --git a/pkg/envtest/server.go b/pkg/envtest/server.go new file mode 100644 index 0000000000..f9e0bb8aba --- /dev/null +++ b/pkg/envtest/server.go @@ -0,0 +1,374 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "fmt" + "os" + "strings" + "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var log = logf.RuntimeLog.WithName("test-env") + +/* +It's possible to override some defaults, by setting the following environment variables: +* USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster +* TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use +* TEST_ASSET_ETCD (string): path to the etcd binary to use +* TEST_ASSET_KUBECTL (string): path to the kubectl binary to use +* KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. +* KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr +*/ +const ( + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + StartTimeout = 60 + StopTimeout = 60 + + defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second + defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second +) + +// internal types we expose as part of our public API. +type ( + // ControlPlane is the re-exported ControlPlane type from the internal testing package. + ControlPlane = controlplane.ControlPlane + + // APIServer is the re-exported APIServer from the internal testing package. + APIServer = controlplane.APIServer + + // Etcd is the re-exported Etcd from the internal testing package. + Etcd = controlplane.Etcd + + // User represents a Kubernetes user to provision for auth purposes. + User = controlplane.User + + // AuthenticatedUser represets a Kubernetes user that's been provisioned. + AuthenticatedUser = controlplane.AuthenticatedUser + + // ListenAddr indicates the address and port that the API server should listen on. + ListenAddr = process.ListenAddr + + // SecureServing contains details describing how the API server should serve + // its secure endpoint. + SecureServing = controlplane.SecureServing + + // Authn is an authentication method that can be used with the control plane to + // provision users. + Authn = controlplane.Authn + + // Arguments allows configuring a process's flags. + Arguments = process.Arguments + + // Arg is a single flag with one or more values. + Arg = process.Arg +) + +var ( + // EmptyArguments constructs a new set of flags with nothing set. + // + // This is mostly useful for testing helper methods -- you'll want to call + // Configure on the APIServer (or etcd) to configure their arguments. + EmptyArguments = process.EmptyArguments +) + +// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and +// install extension APIs. +type Environment struct { + // ControlPlane is the ControlPlane including the apiserver and etcd + ControlPlane controlplane.ControlPlane + + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Config can be used to talk to the apiserver. It's automatically + // populated if not set using the standard controller-runtime config + // loading. + Config *rest.Config + + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // WebhookInstallOptions are the options for installing webhooks. + WebhookInstallOptions WebhookInstallOptions + + // ErrorIfCRDPathMissing provides an interface for the underlying + // CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures + // for missing CRD paths. + ErrorIfCRDPathMissing bool + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. + CRDs []*apiextensionsv1.CustomResourceDefinition + + // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. + CRDDirectoryPaths []string + + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + BinaryAssetsDirectory string + + // UseExistingCluster indicates that this environments should use an + // existing kubeconfig, instead of trying to stand up a new control plane. + // This is useful in cases that need aggregated API servers and the like. + UseExistingCluster *bool + + // ControlPlaneStartTimeout is the maximum duration each controlplane component + // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStartTimeout time.Duration + + // ControlPlaneStopTimeout is the maximum duration each controlplane component + // may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStopTimeout time.Duration + + // KubeAPIServerFlags is the set of flags passed while starting the api server. + // + // Deprecated: use ControlPlane.GetAPIServer().Configure() instead. + KubeAPIServerFlags []string + + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool +} + +// Stop stops a running server. +// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled +// if CRDInstallOptions.CleanUpAfterUse are set to true. +func (te *Environment) Stop() error { + if te.CRDInstallOptions.CleanUpAfterUse { + if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil { + return err + } + } + + if err := te.WebhookInstallOptions.Cleanup(); err != nil { + return err + } + + if te.useExistingCluster() { + return nil + } + + return te.ControlPlane.Stop() +} + +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. +func (te *Environment) Start() (*rest.Config, error) { + if te.useExistingCluster() { + log.V(1).Info("using existing cluster") + if te.Config == nil { + // we want to allow people to pass in their own config, so + // only load a config if it hasn't already been set. + log.V(1).Info("automatically acquiring client configuration") + + var err error + te.Config, err = config.GetConfig() + if err != nil { + return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err) + } + } + } else { + apiServer := te.ControlPlane.GetAPIServer() + if len(apiServer.Args) == 0 { //nolint:staticcheck + // pass these through separately from above in case something like + // AddUser defaults APIServer. + // + // TODO(directxman12): if/when we feel like making a bigger + // breaking change here, just make APIServer and Etcd non-pointers + // in ControlPlane. + + // NB(directxman12): we still pass these in so that things work if the + // user manually specifies them, but in most cases we expect them to + // be nil so that we use the new .Configure() logic. + apiServer.Args = te.KubeAPIServerFlags //nolint:staticcheck + } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true + } + if apiServer.Out == nil && te.AttachControlPlaneOutput { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil && te.AttachControlPlaneOutput { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Err = os.Stderr + } + + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + + if err := te.defaultTimeouts(); err != nil { + return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err) + } + te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout + apiServer.StartTimeout = te.ControlPlaneStartTimeout + apiServer.StopTimeout = te.ControlPlaneStopTimeout + + log.V(1).Info("starting control plane") + if err := te.startControlPlane(); err != nil { + return nil, fmt.Errorf("unable to start control plane itself: %w", err) + } + + // Create the *rest.Config for creating new clients + baseConfig := &rest.Config{ + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + + adminInfo := User{Name: "admin", Groups: []string{"system:masters"}} + adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig) + if err != nil { + return te.Config, fmt.Errorf("unable to provision admin user: %w", err) + } + te.Config = adminUser.Config() + } + + // Set the default scheme if nil. + if te.Scheme == nil { + te.Scheme = scheme.Scheme + } + + // Call PrepWithoutInstalling to setup certificates first + // and have them available to patch CRD conversion webhook as well. + if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil { + return nil, err + } + + log.V(1).Info("installing CRDs") + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing + te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions + crds, err := InstallCRDs(te.Config, te.CRDInstallOptions) + if err != nil { + return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err) + } + te.CRDs = crds + + log.V(1).Info("installing webhooks") + if err := te.WebhookInstallOptions.Install(te.Config); err != nil { + return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err) + } + return te.Config, nil +} + +// AddUser provisions a new user for connecting to this Environment. The user will +// have the specified name & belong to the specified groups. +// +// If you specify a "base" config, the returned REST Config will contain those +// settings as well as any required by the authentication method. You can use +// this to easily specify options like QPS. +// +// This is effectively a convinience alias for ControlPlane.AddUser -- see that +// for more low-level details. +func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + return te.ControlPlane.AddUser(user, baseConfig) +} + +func (te *Environment) startControlPlane() error { + numTries, maxRetries := 0, 5 + var err error + for ; numTries < maxRetries; numTries++ { + // Start the control plane - retry if it fails + err = te.ControlPlane.Start() + if err == nil { + break + } + log.Error(err, "unable to start the controlplane", "tries", numTries) + } + if numTries == maxRetries { + return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err) + } + return nil +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(envStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(envStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout + } + } + return nil +} + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} + +// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that +// you can use those to append your own additional arguments. +// +// Deprecated: use APIServer.Configure() instead. +var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck diff --git a/pkg/envtest/testdata/crds/examplecrd3.yaml b/pkg/envtest/testdata/crds/examplecrd3.yaml new file mode 100644 index 0000000000..479a6e5645 --- /dev/null +++ b/pkg/envtest/testdata/crds/examplecrd3.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: configs.foo.example.com +spec: + group: foo.example.com + names: + kind: Config + plural: configs + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/crds/examplecrd_unserved.yaml b/pkg/envtest/testdata/crds/examplecrd_unserved.yaml new file mode 100644 index 0000000000..09fac4f080 --- /dev/null +++ b/pkg/envtest/testdata/crds/examplecrd_unserved.yaml @@ -0,0 +1,88 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + name: frigates.ship.example.com +spec: + group: ship.example.com + names: + kind: Frigate + listKind: FrigateList + plural: frigates + singular: frigate + scope: Namespaced + subresources: + status: {} + versions: + - name: v1 + served: false + storage: true + schema: + openAPIV3Schema: + description: Frigate is the Schema for the frigates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FrigateSpec defines the desired state of Frigate + properties: + foo: + description: Foo is an example field of Frigate. Edit Frigate_types.go + to remove/update + type: string + type: object + status: + description: FrigateStatus defines the observed state of Frigate + type: object + type: object + - name: v1beta1 + served: false + storage: false + schema: + openAPIV3Schema: + description: Frigate is the Schema for the frigates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FrigateSpec defines the desired state of Frigate + properties: + foo: + description: Foo is an example field of Frigate. Edit Frigate_types.go + to remove/update + type: string + type: object + status: + description: FrigateStatus defines the observed state of Frigate + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml b/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml new file mode 100644 index 0000000000..5dead8186a --- /dev/null +++ b/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml @@ -0,0 +1,61 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: "" + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml b/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml new file mode 100644 index 0000000000..9eb0ec91a2 --- /dev/null +++ b/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml @@ -0,0 +1,83 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v3 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/example_multiversion_crd1.yaml b/pkg/envtest/testdata/example_multiversion_crd1.yaml new file mode 100644 index 0000000000..5bb2d73f69 --- /dev/null +++ b/pkg/envtest/testdata/example_multiversion_crd1.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/examplecrd.yaml b/pkg/envtest/testdata/examplecrd.yaml new file mode 100644 index 0000000000..f1638f8310 --- /dev/null +++ b/pkg/envtest/testdata/examplecrd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bazs.qux.example.com +spec: + group: qux.example.com + names: + kind: Baz + plural: bazs + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/examplecrd_v1.yaml b/pkg/envtest/testdata/examplecrd_v1.yaml new file mode 100644 index 0000000000..e2bddbc528 --- /dev/null +++ b/pkg/envtest/testdata/examplecrd_v1.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: foos.bar.example.com +spec: + group: bar.example.com + names: + kind: Foo + plural: foos + scope: Namespaced + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/multiplecrds.yaml b/pkg/envtest/testdata/multiplecrds.yaml new file mode 100644 index 0000000000..a855140ead --- /dev/null +++ b/pkg/envtest/testdata/multiplecrds.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: captains.crew.example.com +spec: + group: crew.example.com + names: + kind: Captain + plural: captains + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: firstmates.crew.example.com +spec: + group: crew.example.com + names: + kind: FirstMate + plural: firstmates + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object +--- diff --git a/pkg/envtest/testdata/notcrd.yaml b/pkg/envtest/testdata/notcrd.yaml new file mode 100644 index 0000000000..a0f1f582c8 --- /dev/null +++ b/pkg/envtest/testdata/notcrd.yaml @@ -0,0 +1,18 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 diff --git a/pkg/envtest/testdata/webhooks/manifests.yaml b/pkg/envtest/testdata/webhooks/manifests.yaml new file mode 100644 index 0000000000..72437905cd --- /dev/null +++ b/pkg/envtest/testdata/webhooks/manifests.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-v1 + failurePolicy: Fail + name: mpods.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration2 +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-v1 + failurePolicy: Fail + name: mpods2.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-v1 + failurePolicy: Fail + name: vpods.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration2 +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-v1 + failurePolicy: Fail + name: vpods2.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + diff --git a/pkg/envtest/webhook.go b/pkg/envtest/webhook.go new file mode 100644 index 0000000000..9b763b6c24 --- /dev/null +++ b/pkg/envtest/webhook.go @@ -0,0 +1,433 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "time" + + admissionv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// WebhookInstallOptions are the options for installing mutating or validating webhooks. +type WebhookInstallOptions struct { + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string + + // MutatingWebhooks is a list of MutatingWebhookConfigurations to install + MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration + + // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install + ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true + IgnoreErrorIfPathMissing bool + + // LocalServingHost is the host for serving webhooks on. + // it will be automatically populated + LocalServingHost string + + // LocalServingPort is the allocated port for serving webhooks on. + // it will be automatically populated by a random available local port + LocalServingPort int + + // LocalServingCertDir is the allocated directory for serving certificates. + // it will be automatically populated by the local temp dir + LocalServingCertDir string + + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + + // LocalServingHostExternalName is the hostname to use to reach the webhook server. + LocalServingHostExternalName string + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration +} + +// ModifyWebhookDefinitions modifies webhook definitions by: +// - applying CABundle based on the provided tinyca +// - if webhook client config uses service spec, it's removed and replaced with direct url. +func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error { + caData := o.LocalServingCAData + + // generate host port. + hostPort, err := o.generateHostPort() + if err != nil { + return err + } + + for i := range o.MutatingWebhooks { + for j := range o.MutatingWebhooks[i].Webhooks { + updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + + for i := range o.ValidatingWebhooks { + for j := range o.ValidatingWebhooks[i].Webhooks { + updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + return nil +} + +func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) { + cc.CABundle = caData + if cc.Service != nil && cc.Service.Path != nil { + url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path) + cc.URL = &url + cc.Service = nil + } +} + +func (o *WebhookInstallOptions) generateHostPort() (string, error) { + if o.LocalServingPort == 0 { + port, host, err := addr.Suggest(o.LocalServingHost) + if err != nil { + return "", fmt.Errorf("unable to grab random port for serving webhooks on: %w", err) + } + o.LocalServingPort = port + o.LocalServingHost = host + } + host := o.LocalServingHostExternalName + if host == "" { + host = o.LocalServingHost + } + return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil +} + +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { + if err := o.setupCA(); err != nil { + return err + } + + if err := parseWebhook(o); err != nil { + return err + } + + return o.ModifyWebhookDefinitions() +} + +// Install installs specified webhooks to the API server. +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + if len(o.LocalServingCAData) == 0 { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + } + + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { + return err + } + + return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o) +} + +// Cleanup cleans up cert directories. +func (o *WebhookInstallOptions) Cleanup() error { + if o.LocalServingCertDir != "" { + return os.RemoveAll(o.LocalServingCertDir) + } + return nil +} + +// WaitForWebhooks waits for the Webhooks to be available through API server. +func WaitForWebhooks(config *rest.Config, + mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, + validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, + options WebhookInstallOptions) error { + waitingFor := map[schema.GroupVersionKind]*sets.String{} + + for _, hook := range mutatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.String{} + } + waitingFor[gvk].Insert(h.GetName()) + } + + for _, hook := range validatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.String{} + } + waitingFor[gvk].Insert(hook.GetName()) + } + + // Poll until all resources are found in discovery + p := &webhookPoller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type webhookPoller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersionKind]*sets.String +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *webhookPoller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + c, err := client.New(p.config, client.Options{}) + if err != nil { + return false, err + } + + allFound := true + for gvk, names := range p.waitingFor { + if names.Len() == 0 { + delete(p.waitingFor, gvk) + continue + } + for _, name := range names.List() { + var obj = &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + err := c.Get(context.Background(), client.ObjectKey{ + Namespace: "", + Name: name, + }, obj) + + if err == nil { + names.Delete(name) + } + + if apierrors.IsNotFound(err) { + allFound = false + } + if err != nil { + return false, err + } + } + } + return allFound, nil +} + +// setupCA creates CA for testing and writes them to disk. +func (o *WebhookInstallOptions) setupCA() error { + hookCA, err := certs.NewTinyCA() + if err != nil { + return fmt.Errorf("unable to set up webhook CA: %w", err) + } + + names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName} + hookCert, err := hookCA.NewServingCert(names...) + if err != nil { + return fmt.Errorf("unable to set up webhook serving certs: %w", err) + } + + localServingCertsDir, err := os.MkdirTemp("", "envtest-serving-certs-") + o.LocalServingCertDir = localServingCertsDir + if err != nil { + return fmt.Errorf("unable to create directory for webhook serving certs: %w", err) + } + + certData, keyData, err := hookCert.AsBytes() + if err != nil { + return fmt.Errorf("unable to marshal webhook serving certs: %w", err) + } + + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving cert to disk: %w", err) + } + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to write webhook serving key to disk: %w", err) + } + + o.LocalServingCAData = certData + return err +} + +func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Create each webhook + for _, hook := range mutHooks { + hook := hook + log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + for _, hook := range valHooks { + hook := hook + log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + return nil +} + +// ensureCreated creates or update object if already exists in the cluster. +func ensureCreated(cs client.Client, obj client.Object) error { + existing := obj.DeepCopyObject().(client.Object) + err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.Background(), obj); err != nil { + return err + } + case err != nil: + return err + default: + log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName()) + obj.SetResourceVersion(existing.GetResourceVersion()) + if err := cs.Update(context.Background(), obj); err != nil { + return err + } + } + return nil +} + +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options. +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + _, err := os.Stat(path) + if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + continue // skip this path + } + if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + return err // treat missing path as error + } + mutHooks, valHooks, err := readWebhooks(path) + if err != nil { + return err + } + options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...) + options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...) + } + } + return nil +} + +// readWebhooks reads the Webhooks from files and Unmarshals them into structs +// returns slice of mutating and validating webhook configurations. +func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) { + // Get the webhook files + var files []string + var err error + log.V(1).Info("reading Webhooks from path", "path", path) + info, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + if !info.IsDir() { + path, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.NewString(".json", ".yaml", ".yml") + + var mutHooks []*admissionv1.MutatingWebhookConfiguration + var valHooks []*admissionv1.ValidatingWebhookConfiguration + for _, file := range files { + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file)) + if err != nil { + return nil, nil, err + } + + for _, doc := range docs { + var generic metav1.PartialObjectMetadata + if err = yaml.Unmarshal(doc, &generic); err != nil { + return nil, nil, err + } + + const ( + admissionregv1 = "admissionregistration.k8s.io/v1" + ) + switch { + case generic.Kind == "MutatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.MutatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + mutHooks = append(mutHooks, hook) + case generic.Kind == "ValidatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.ValidatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + valHooks = append(valHooks, hook) + default: + continue + } + } + + log.V(1).Info("read webhooks from file", "file", file) + } + return mutHooks, valHooks, nil +} diff --git a/pkg/envtest/webhook_test.go b/pkg/envtest/webhook_test.go new file mode 100644 index 0000000000..6f900bb7fb --- /dev/null +++ b/pkg/envtest/webhook_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ = Describe("Test", func() { + + Describe("Webhook", func() { + It("should reject create request for webhook that rejects all requests", func() { + m, err := manager.New(env.Config, manager.Options{ + Port: env.WebhookInstallOptions.LocalServingPort, + Host: env.WebhookInstallOptions.LocalServingHost, + CertDir: env.WebhookInstallOptions.LocalServingCertDir, + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{}}) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + _ = server.Start(ctx) + }() + + c, err := client.New(env.Config, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + + Eventually(func() bool { + err = c.Create(context.TODO(), obj) + return apierrors.ReasonForError(err) == metav1.StatusReason("Always denied") + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + + It("should load webhooks from directory", func() { + installOptions := WebhookInstallOptions{ + Paths: []string{filepath.Join("testdata", "webhooks")}, + } + err := parseWebhook(&installOptions) + Expect(err).NotTo(HaveOccurred()) + Expect(len(installOptions.MutatingWebhooks)).To(Equal(2)) + Expect(len(installOptions.ValidatingWebhooks)).To(Equal(2)) + }) + + It("should load webhooks from files", func() { + installOptions := WebhookInstallOptions{ + Paths: []string{filepath.Join("testdata", "webhooks", "manifests.yaml")}, + } + err := parseWebhook(&installOptions) + Expect(err).NotTo(HaveOccurred()) + Expect(len(installOptions.MutatingWebhooks)).To(Equal(2)) + Expect(len(installOptions.ValidatingWebhooks)).To(Equal(2)) + }) + }) +}) + +type rejectingValidator struct { +} + +func (v *rejectingValidator) Handle(_ context.Context, _ admission.Request) admission.Response { + return admission.Denied("Always denied") +} diff --git a/pkg/event/doc.go b/pkg/event/doc.go new file mode 100644 index 0000000000..adba3bbc16 --- /dev/null +++ b/pkg/event/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package event contains the definitions for the Event types produced by source.Sources and transformed into +reconcile.Requests by handler.EventHandler. + +You should rarely need to work with these directly -- instead, use Controller.Watch with +source.Sources and handler.EventHandlers. + +Events generally contain both a full runtime.Object that caused the event, as well +as a direct handle to that object's metadata. This saves a lot of typecasting in +code that works with Events. +*/ +package event diff --git a/pkg/event/event.go b/pkg/event/event.go new file mode 100644 index 0000000000..271b3c00fb --- /dev/null +++ b/pkg/event/event.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type CreateEvent struct { + // Object is the object from the event + Object client.Object +} + +// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type UpdateEvent struct { + // ObjectOld is the object from the event + ObjectOld client.Object + + // ObjectNew is the object from the event + ObjectNew client.Object +} + +// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type DeleteEvent struct { + // Object is the object from the event + Object client.Object + + // DeleteStateUnknown is true if the Delete event was missed but we identified the object + // as having been deleted. + DeleteStateUnknown bool +} + +// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.EventHandler. +type GenericEvent struct { + // Object is the object from the event + Object client.Object +} diff --git a/pkg/finalizer/finalizer.go b/pkg/finalizer/finalizer.go new file mode 100644 index 0000000000..10c5645dbe --- /dev/null +++ b/pkg/finalizer/finalizer.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finalizer + +import ( + "context" + "fmt" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type finalizers map[string]Finalizer + +// Result struct holds information about what parts of an object were updated by finalizer(s). +type Result struct { + // Updated will be true if at least one of the object's non-status field + // was updated by some registered finalizer. + Updated bool + // StatusUpdated will be true if at least one of the object's status' fields + // was updated by some registered finalizer. + StatusUpdated bool +} + +// NewFinalizers returns the Finalizers interface. +func NewFinalizers() Finalizers { + return finalizers{} +} + +func (f finalizers) Register(key string, finalizer Finalizer) error { + if _, ok := f[key]; ok { + return fmt.Errorf("finalizer for key %q already registered", key) + } + f[key] = finalizer + return nil +} + +func (f finalizers) Finalize(ctx context.Context, obj client.Object) (Result, error) { + var ( + res Result + errList []error + ) + res.Updated = false + for key, finalizer := range f { + if dt := obj.GetDeletionTimestamp(); dt.IsZero() && !controllerutil.ContainsFinalizer(obj, key) { + controllerutil.AddFinalizer(obj, key) + res.Updated = true + } else if !dt.IsZero() && controllerutil.ContainsFinalizer(obj, key) { + finalizerRes, err := finalizer.Finalize(ctx, obj) + if err != nil { + // Even when the finalizer fails, it may need to signal to update the primary + // object (e.g. it may set a condition and need a status update). + res.Updated = res.Updated || finalizerRes.Updated + res.StatusUpdated = res.StatusUpdated || finalizerRes.StatusUpdated + errList = append(errList, fmt.Errorf("finalizer %q failed: %w", key, err)) + } else { + // If the finalizer succeeds, we remove the finalizer from the primary + // object's metadata, so we know it will need an update. + res.Updated = true + controllerutil.RemoveFinalizer(obj, key) + // The finalizer may have updated the status too. + res.StatusUpdated = res.StatusUpdated || finalizerRes.StatusUpdated + } + } + } + return res, kerrors.NewAggregate(errList) +} diff --git a/pkg/finalizer/finalizer_test.go b/pkg/finalizer/finalizer_test.go new file mode 100644 index 0000000000..944acd595a --- /dev/null +++ b/pkg/finalizer/finalizer_test.go @@ -0,0 +1,216 @@ +package finalizer + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +type mockFinalizer struct { + result Result + err error +} + +func (f mockFinalizer) Finalize(context.Context, client.Object) (Result, error) { + return f.result, f.err +} +func TestFinalizer(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Finalizer Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = Describe("TestFinalizer", func() { + var err error + var pod *corev1.Pod + var finalizers Finalizers + var f mockFinalizer + BeforeEach(func() { + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{}, + } + finalizers = NewFinalizers() + f = mockFinalizer{} + }) + Describe("Register", func() { + It("successfully registers a finalizer", func() { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + }) + + It("should fail when trying to register a finalizer that was already registered", func() { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + + // calling Register again with the same key should return an error + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).NotTo(BeNil()) + Expect(err.Error()).To(ContainSubstring("already registered")) + + }) + }) + + Describe("Finalize", func() { + It("successfully finalizes and returns true for Updated when deletion timestamp is nil and finalizer does not exist", func() { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + + pod.DeletionTimestamp = nil + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeTrue()) + // when deletion timestamp is nil and finalizer is not present, the registered finalizer would be added to the obj + Expect(len(pod.Finalizers)).To(Equal(1)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer")) + + }) + + It("successfully finalizes and returns true for Updated when deletion timestamp is not nil and the finalizer exists", func() { + now := metav1.Now() + pod.DeletionTimestamp = &now + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer"} + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeTrue()) + // finalizer will be removed from the obj upon successful finalization + Expect(len(pod.Finalizers)).To(Equal(0)) + }) + + It("should return no error and return false for Updated when deletion timestamp is nil and finalizer doesn't exist", func() { + pod.DeletionTimestamp = nil + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeFalse()) + Expect(len(pod.Finalizers)).To(Equal(0)) + + }) + + It("should return no error and return false for Updated when deletion timestamp is not nil and the finalizer doesn't exist", func() { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeFalse()) + Expect(len(pod.Finalizers)).To(Equal(0)) + + }) + + It("successfully finalizes multiple finalizers and returns true for Updated when deletion timestamp is not nil and the finalizer exists", func() { + now := metav1.Now() + pod.DeletionTimestamp = &now + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + + err = finalizers.Register("finalizers.sigs.k8s.io/newtestfinalizer", f) + Expect(err).To(BeNil()) + + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer", "finalizers.sigs.k8s.io/newtestfinalizer"} + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(len(pod.Finalizers)).To(Equal(0)) + }) + + It("should return result as false and a non-nil error", func() { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer"} + + f.result.Updated = false + f.result.StatusUpdated = false + f.err = fmt.Errorf("finalizer failed for %q", pod.Finalizers[0]) + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(BeNil()) + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeFalse()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(len(pod.Finalizers)).To(Equal(1)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer")) + }) + + It("should return expected result values and error values when registering multiple finalizers", func() { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{ + "finalizers.sigs.k8s.io/testfinalizer1", + "finalizers.sigs.k8s.io/testfinalizer2", + "finalizers.sigs.k8s.io/testfinalizer3", + } + + // registering multiple finalizers with different return values + // test for Updated as true, and nil error + f.result.Updated = true + f.result.StatusUpdated = false + f.err = nil + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer1", f) + Expect(err).To(BeNil()) + + result, err := finalizers.Finalize(context.TODO(), pod) + Expect(err).To(BeNil()) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeFalse()) + // `finalizers.sigs.k8s.io/testfinalizer1` will be removed from the list + // of finalizers, so length will be 2. + Expect(len(pod.Finalizers)).To(Equal(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + + // test for Updated and StatusUpdated as false, and non-nil error + f.result.Updated = false + f.result.StatusUpdated = false + f.err = fmt.Errorf("finalizer failed") + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer2", f) + Expect(err).To(BeNil()) + + result, err = finalizers.Finalize(context.TODO(), pod) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeFalse()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(len(pod.Finalizers)).To(Equal(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + + // test for result as true, and non-nil error + f.result.Updated = true + f.result.StatusUpdated = true + f.err = fmt.Errorf("finalizer failed") + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer3", f) + Expect(err).To(BeNil()) + + result, err = finalizers.Finalize(context.TODO(), pod) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeTrue()) + Expect(len(pod.Finalizers)).To(Equal(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + }) + }) +}) diff --git a/pkg/finalizer/types.go b/pkg/finalizer/types.go new file mode 100644 index 0000000000..e3a002a935 --- /dev/null +++ b/pkg/finalizer/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finalizer + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Registerer holds Register that will check if a key is already registered +// and error out and it does; and if not registered, it will add the finalizer +// to the finalizers map as the value for the provided key. +type Registerer interface { + Register(key string, f Finalizer) error +} + +// Finalizer holds Finalize that will add/remove a finalizer based on the +// deletion timestamp being set and return an indication of whether the +// obj needs an update or not. +type Finalizer interface { + Finalize(context.Context, client.Object) (Result, error) +} + +// Finalizers implements Registerer and Finalizer to finalize all registered +// finalizers if the provided object has a deletion timestamp or set all +// registered finalizers if it does not. +type Finalizers interface { + Registerer + Finalizer +} diff --git a/pkg/handler/doc.go b/pkg/handler/doc.go new file mode 100644 index 0000000000..e5fd177aff --- /dev/null +++ b/pkg/handler/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package handler defines EventHandlers that enqueue reconcile.Requests in response to Create, Update, Deletion Events +observed from Watching Kubernetes APIs. Users should provide a source.Source and handler.EventHandler to +Controller.Watch in order to generate and enqueue reconcile.Request work items. + +Generally, following premade event handlers should be sufficient for most use cases: + +EventHandlers: + +EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will +cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be +reconciled. + +EnqueueRequestForOwner - Enqueues a reconcile.Request containing the Name and Namespace of the Owner of the object in the Event. +This will cause owner of the object that was the source of the Event (e.g. the owner object that created the object) +to be reconciled. + +EnqueueRequestsFromMapFunc - Enqueues reconcile.Requests resulting from a user provided transformation function run against the +object in the Event. This will cause an arbitrary collection of objects (defined from a transformation of the +source object) to be reconciled. +*/ +package handler diff --git a/pkg/handler/enqueue.go b/pkg/handler/enqueue.go new file mode 100644 index 0000000000..e6d3a4eaab --- /dev/null +++ b/pkg/handler/enqueue.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject") + +type empty struct{} + +var _ EventHandler = &EnqueueRequestForObject{} + +// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event. +// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all +// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. +type EnqueueRequestForObject struct{} + +// Create implements EventHandler. +func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Update implements EventHandler. +func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + switch { + case evt.ObjectNew != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectNew.GetName(), + Namespace: evt.ObjectNew.GetNamespace(), + }}) + case evt.ObjectOld != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectOld.GetName(), + Namespace: evt.ObjectOld.GetNamespace(), + }}) + default: + enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} diff --git a/pkg/handler/enqueue_mapped.go b/pkg/handler/enqueue_mapped.go new file mode 100644 index 0000000000..17401b1fdb --- /dev/null +++ b/pkg/handler/enqueue_mapped.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +// MapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. +type MapFunc func(client.Object) []reconcile.Request + +// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection +// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects +// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects +// in response to a cluster resize event caused by adding or deleting a Node) +// +// EnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other +// objects of a differing type. +// +// For UpdateEvents which contain both a new and old object, the transformation function is run on both +// objects and both sets of Requests are enqueue. +func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { + return &enqueueRequestsFromMapFunc{ + toRequests: fn, + } +} + +var _ EventHandler = &enqueueRequestsFromMapFunc{} + +type enqueueRequestsFromMapFunc struct { + // Mapper transforms the argument into a slice of keys to be reconciled + toRequests MapFunc +} + +// Create implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Update implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.ObjectOld, reqs) + e.mapAndEnqueue(q, evt.ObjectNew, reqs) +} + +// Delete implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Generic implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(object) { + _, ok := reqs[req] + if !ok { + q.Add(req) + reqs[req] = empty{} + } + } +} + +// EnqueueRequestsFromMapFunc can inject fields into the mapper. + +// InjectFunc implements inject.Injector. +func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { + if f == nil { + return nil + } + return f(e.toRequests) +} diff --git a/pkg/handler/enqueue_owner.go b/pkg/handler/enqueue_owner.go new file mode 100644 index 0000000000..63699893fc --- /dev/null +++ b/pkg/handler/enqueue_owner.go @@ -0,0 +1,189 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ EventHandler = &EnqueueRequestForOwner{} + +var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") + +// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created +// the object that was the source of the Event. +// +// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using: +// +// - a source.Kind Source with Type of Pod. +// +// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. +type EnqueueRequestForOwner struct { + // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + OwnerType runtime.Object + + // IsController if set will only look at the first OwnerReference with Controller: true. + IsController bool + + // groupKind is the cached Group and Kind from OwnerType + groupKind schema.GroupKind + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper +} + +// Create implements EventHandler. +func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Update implements EventHandler. +func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.ObjectOld, reqs) + e.getOwnerReconcileRequest(evt.ObjectNew, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false +// if the OwnerType could not be parsed using the scheme. +func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { + // Get the kinds of the type + kinds, _, err := scheme.ObjectKinds(e.OwnerType) + if err != nil { + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + return err + } + // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. + if len(kinds) != 1 { + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + return err + } + // Cache the Group and Kind for the OwnerType + e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + return nil +} + +// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile +// owners of object that match e.OwnerType. +func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { + // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested + // by the user + for _, ref := range e.getOwnersReferences(object) { + // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType + refGV, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + log.Error(err, "Could not parse OwnerReference APIVersion", + "api version", ref.APIVersion) + return + } + + // Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user. + // If the two match, create a Request for the objected referred to by + // the OwnerReference. Use the Name from the OwnerReference and the Namespace from the + // object in the event. + if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { + // Match found - add a Request for the object referred to in the OwnerReference + request := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: ref.Name, + }} + + // if owner is not namespaced then we should set the namespace to the empty + mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) + if err != nil { + log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) + return + } + if mapping.Scope.Name() != meta.RESTScopeNameRoot { + request.Namespace = object.GetNamespace() + } + + result[request] = empty{} + } + } +} + +// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// - if IsController is true: only take the Controller OwnerReference (if found) +// - if IsController is false: take all OwnerReferences. +func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { + if object == nil { + return nil + } + + // If not filtered as Controller only, then use all the OwnerReferences + if !e.IsController { + return object.GetOwnerReferences() + } + // If filtered to a Controller, only take the Controller OwnerReference + if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + return []metav1.OwnerReference{*ownerRef} + } + // No Controller OwnerReference found + return nil +} + +var _ inject.Scheme = &EnqueueRequestForOwner{} + +// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. +func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { + return e.parseOwnerTypeGroupKind(s) +} + +var _ inject.Mapper = &EnqueueRequestForOwner{} + +// InjectMapper is called by the Controller to provide the rest mapper used by the manager. +func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { + e.mapper = m + return nil +} diff --git a/pkg/handler/eventhandler.go b/pkg/handler/eventhandler.go new file mode 100644 index 0000000000..8652d22d72 --- /dev/null +++ b/pkg/handler/eventhandler.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event +// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an +// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar. +// +// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called. +// +// * Use EnqueueRequestForObject to reconcile the object the event is for +// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller) +// +// * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for +// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) +// +// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object +// of a different type - do this for events for types the Controller may be interested in, but doesn't create. +// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) +// +// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. +// Most users shouldn't need to implement their own EventHandler. +type EventHandler interface { + // Create is called in response to an create event - e.g. Pod Creation. + Create(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event - e.g. Pod Updated. + Update(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event - e.g. Pod Deleted. + Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + + // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or + // external trigger request - e.g. reconcile Autoscaling, or a Webhook. + Generic(event.GenericEvent, workqueue.RateLimitingInterface) +} + +var _ EventHandler = Funcs{} + +// Funcs implements EventHandler. +type Funcs struct { + // Create is called in response to an add event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + + // GenericFunc is called in response to a generic event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) +} + +// Create implements EventHandler. +func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + if h.CreateFunc != nil { + h.CreateFunc(e, q) + } +} + +// Delete implements EventHandler. +func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + if h.DeleteFunc != nil { + h.DeleteFunc(e, q) + } +} + +// Update implements EventHandler. +func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + if h.UpdateFunc != nil { + h.UpdateFunc(e, q) + } +} + +// Generic implements EventHandler. +func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + if h.GenericFunc != nil { + h.GenericFunc(e, q) + } +} diff --git a/pkg/handler/eventhandler_suite_test.go b/pkg/handler/eventhandler_suite_test.go new file mode 100644 index 0000000000..ebcc993915 --- /dev/null +++ b/pkg/handler/eventhandler_suite_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestEventhandler(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Eventhandler Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/handler/eventhandler_test.go b/pkg/handler/eventhandler_test.go new file mode 100644 index 0000000000..61db62e66a --- /dev/null +++ b/pkg/handler/eventhandler_test.go @@ -0,0 +1,909 @@ +/* + Copyright 2018 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handler_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("Eventhandler", func() { + var q workqueue.RateLimitingInterface + var instance handler.EnqueueRequestForObject + var pod *corev1.Pod + var mapper meta.RESTMapper + t := true + BeforeEach(func() { + q = controllertest.Queue{Interface: workqueue.New()} + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "biz", Name: "baz"}, + } + Expect(cfg).NotTo(BeNil()) + + var err error + mapper, err = apiutil.NewDiscoveryRESTMapper(cfg) + Expect(err).ShouldNot(HaveOccurred()) + }) + + Describe("EnqueueRequestForObject", func() { + It("should enqueue a Request with the Name / Namespace of the object in the CreateEvent.", func() { + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).NotTo(BeNil()) + req, ok := i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) + }) + + It("should enqueue a Request with the Name / Namespace of the object in the DeleteEvent.", func() { + evt := event.DeleteEvent{ + Object: pod, + } + instance.Delete(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).NotTo(BeNil()) + req, ok := i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) + }) + + It("should enqueue a Request with the Name / Namespace of one object in the UpdateEvent.", + func() { + newPod := pod.DeepCopy() + newPod.Name = "baz2" + newPod.Namespace = "biz2" + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).NotTo(BeNil()) + req, ok := i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz2", Name: "baz2"})) + }) + + It("should enqueue a Request with the Name / Namespace of the object in the GenericEvent.", func() { + evt := event.GenericEvent{ + Object: pod, + } + instance.Generic(evt, q) + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).NotTo(BeNil()) + req, ok := i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) + }) + + Context("for a runtime.Object without Object", func() { + It("should do nothing if the Object is missing for a CreateEvent.", func() { + evt := event.CreateEvent{ + Object: nil, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + + It("should do nothing if the Object is missing for a UpdateEvent.", func() { + newPod := pod.DeepCopy() + newPod.Name = "baz2" + newPod.Namespace = "biz2" + + evt := event.UpdateEvent{ + ObjectNew: newPod, + ObjectOld: nil, + } + instance.Update(evt, q) + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).NotTo(BeNil()) + req, ok := i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz2", Name: "baz2"})) + + evt.ObjectNew = nil + evt.ObjectOld = pod + instance.Update(evt, q) + Expect(q.Len()).To(Equal(1)) + i, _ = q.Get() + Expect(i).NotTo(BeNil()) + req, ok = i.(reconcile.Request) + Expect(ok).To(BeTrue()) + Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) + }) + + It("should do nothing if the Object is missing for a DeleteEvent.", func() { + evt := event.DeleteEvent{ + Object: nil, + } + instance.Delete(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + + It("should do nothing if the Object is missing for a GenericEvent.", func() { + evt := event.GenericEvent{ + Object: nil, + } + instance.Generic(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + }) + + Describe("EnqueueRequestsFromMapFunc", func() { + It("should enqueue a Request with the function applied to the CreateEvent.", func() { + req := []reconcile.Request{} + instance := handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) + + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(2)) + + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) + }) + + It("should enqueue a Request with the function applied to the DeleteEvent.", func() { + req := []reconcile.Request{} + instance := handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) + + evt := event.DeleteEvent{ + Object: pod, + } + instance.Delete(evt, q) + Expect(q.Len()).To(Equal(2)) + + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) + }) + + It("should enqueue a Request with the function applied to both objects in the UpdateEvent.", + func() { + newPod := pod.DeepCopy() + + req := []reconcile.Request{} + + instance := handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + defer GinkgoRecover() + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: a.GetName() + "-bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: a.GetName() + "-baz"}, + }, + } + return req + }) + + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(evt, q) + Expect(q.Len()).To(Equal(2)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "baz-bar"}})) + + i, _ = q.Get() + Expect(i).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz-baz"}})) + }) + + It("should enqueue a Request with the function applied to the GenericEvent.", func() { + req := []reconcile.Request{} + instance := handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) + + evt := event.GenericEvent{ + Object: pod, + } + instance.Generic(evt, q) + Expect(q.Len()).To(Equal(2)) + + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) + }) + }) + + Describe("EnqueueRequestForOwner", func() { + It("should enqueue a Request with the Owner of the object in the CreateEvent.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + + It("should enqueue a Request with the Owner of the object in the DeleteEvent.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.DeleteEvent{ + Object: pod, + } + instance.Delete(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + + It("should enqueue a Request with the Owners of both objects in the UpdateEvent.", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + newPod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo2-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(evt, q) + Expect(q.Len()).To(Equal(2)) + + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: newPod.GetNamespace(), Name: "foo2-parent"}}, + )) + }) + + It("should enqueue a Request with the one duplicate Owner of both objects in the UpdateEvent.", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + newPod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + + It("should enqueue a Request with the Owner of the object in the GenericEvent.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.GenericEvent{ + Object: pod, + } + instance.Generic(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + + It("should not enqueue a Request if there are no owners matching Group and Kind.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + IsController: t, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { // Wrong group + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "extensions/v1", + }, + { // Wrong kind + Name: "foo2-parent", + Kind: "Deployment", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + + It("should enqueue a Request if there are owners matching Group "+ + "and Kind with a different version.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &autoscalingv1.HorizontalPodAutoscaler{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "HorizontalPodAutoscaler", + APIVersion: "autoscaling/v2beta1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + + It("should enqueue a Request for a owner that is cluster scoped", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &corev1.Node{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "node-1", + Kind: "Node", + APIVersion: "v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "", Name: "node-1"}})) + + }) + + It("should not enqueue a Request if there are no owners.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + + Context("with the Controller field set to true", func() { + It("should enqueue reconcile.Requests for only the first the Controller if there are "+ + "multiple Controller owners.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + IsController: t, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo2-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + Controller: &t, + }, + { + Name: "foo3-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo4-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + Controller: &t, + }, + { + Name: "foo5-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(1)) + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo2-parent"}})) + }) + + It("should not enqueue reconcile.Requests if there are no Controller owners.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + IsController: t, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo2-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo3-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + + It("should not enqueue reconcile.Requests if there are no owners.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + IsController: t, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + + Context("with the Controller field set to false", func() { + It("should enqueue a reconcile.Requests for all owners.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo2-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + { + Name: "foo3-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(3)) + + i1, _ := q.Get() + i2, _ := q.Get() + i3, _ := q.Get() + Expect([]interface{}{i1, i2, i3}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo2-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo3-parent"}}, + )) + }) + }) + + Context("with a nil object", func() { + It("should do nothing.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.CreateEvent{ + Object: nil, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + + Context("with a multiple matching kinds", func() { + It("should do nothing.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &metav1.ListOptions{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ListOptions", + APIVersion: "meta/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + Context("with an OwnerType that cannot be resolved", func() { + It("should do nothing.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &controllertest.ErrorType{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ListOptions", + APIVersion: "meta/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + + Context("with a nil OwnerType", func() { + It("should do nothing.", func() { + instance := handler.EnqueueRequestForOwner{} + Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "OwnerType", + APIVersion: "meta/v1", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + + Context("with an invalid APIVersion in the OwnerReference", func() { + It("should do nothing.", func() { + instance := handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.ReplicaSet{}, + } + Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) + Expect(instance.InjectMapper(mapper)).To(Succeed()) + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo1-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1/fail", + }, + } + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + Expect(q.Len()).To(Equal(0)) + }) + }) + }) + + Describe("Funcs", func() { + failingFuncs := handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect CreateEvent to be called.") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect DeleteEvent to be called.") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect UpdateEvent to be called.") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect GenericEvent to be called.") + }, + } + + It("should call CreateFunc for a CreateEvent if provided.", func() { + instance := failingFuncs + evt := event.CreateEvent{ + Object: pod, + } + instance.CreateFunc = func(evt2 event.CreateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt2).To(Equal(evt)) + } + instance.Create(evt, q) + }) + + It("should NOT call CreateFunc for a CreateEvent if NOT provided.", func() { + instance := failingFuncs + instance.CreateFunc = nil + evt := event.CreateEvent{ + Object: pod, + } + instance.Create(evt, q) + }) + + It("should call UpdateFunc for an UpdateEvent if provided.", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + + instance := failingFuncs + instance.UpdateFunc = func(evt2 event.UpdateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt2).To(Equal(evt)) + } + + instance.Update(evt, q) + }) + + It("should NOT call UpdateFunc for an UpdateEvent if NOT provided.", func() { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + newPod.Namespace = pod.Namespace + "2" + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(evt, q) + }) + + It("should call DeleteFunc for a DeleteEvent if provided.", func() { + instance := failingFuncs + evt := event.DeleteEvent{ + Object: pod, + } + instance.DeleteFunc = func(evt2 event.DeleteEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt2).To(Equal(evt)) + } + instance.Delete(evt, q) + }) + + It("should NOT call DeleteFunc for a DeleteEvent if NOT provided.", func() { + instance := failingFuncs + instance.DeleteFunc = nil + evt := event.DeleteEvent{ + Object: pod, + } + instance.Delete(evt, q) + }) + + It("should call GenericFunc for a GenericEvent if provided.", func() { + instance := failingFuncs + evt := event.GenericEvent{ + Object: pod, + } + instance.GenericFunc = func(evt2 event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt2).To(Equal(evt)) + } + instance.Generic(evt, q) + }) + + It("should NOT call GenericFunc for a GenericEvent if NOT provided.", func() { + instance := failingFuncs + instance.GenericFunc = nil + evt := event.GenericEvent{ + Object: pod, + } + instance.Generic(evt, q) + }) + }) +}) diff --git a/pkg/handler/example_test.go b/pkg/handler/example_test.go new file mode 100644 index 0000000000..dbfab46157 --- /dev/null +++ b/pkg/handler/example_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler_test + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var c controller.Controller + +// This example watches Pods and enqueues Requests with the Name and Namespace of the Pod from +// the Event (i.e. change caused by a Create, Update, Delete). +func ExampleEnqueueRequestForObject() { + // controller is a controller.controller + err := c.Watch( + &source.Kind{Type: &corev1.Pod{}}, + &handler.EnqueueRequestForObject{}, + ) + if err != nil { + // handle it + } +} + +// This example watches ReplicaSets and enqueues a Request containing the Name and Namespace of the +// owning (direct) Deployment responsible for the creation of the ReplicaSet. +func ExampleEnqueueRequestForOwner() { + // controller is a controller.controller + err := c.Watch( + &source.Kind{Type: &appsv1.ReplicaSet{}}, + &handler.EnqueueRequestForOwner{ + OwnerType: &appsv1.Deployment{}, + IsController: true, + }, + ) + if err != nil { + // handle it + } +} + +// This example watches Deployments and enqueues a Request contain the Name and Namespace of different +// objects (of Type: MyKind) using a mapping function defined by the user. +func ExampleEnqueueRequestsFromMapFunc() { + // controller is a controller.controller + err := c.Watch( + &source.Kind{Type: &appsv1.Deployment{}}, + handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: a.GetName() + "-1", + Namespace: a.GetNamespace(), + }}, + {NamespacedName: types.NamespacedName{ + Name: a.GetName() + "-2", + Namespace: a.GetNamespace(), + }}, + } + }), + ) + if err != nil { + // handle it + } +} + +// This example implements handler.EnqueueRequestForObject. +func ExampleFuncs() { + // controller is a controller.controller + err := c.Watch( + &source.Kind{Type: &corev1.Pod{}}, + handler.Funcs{ + CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.GetName(), + Namespace: e.Object.GetNamespace(), + }}) + }, + UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.ObjectNew.GetName(), + Namespace: e.ObjectNew.GetNamespace(), + }}) + }, + DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.GetName(), + Namespace: e.Object.GetNamespace(), + }}) + }, + GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.GetName(), + Namespace: e.Object.GetNamespace(), + }}) + }, + }, + ) + if err != nil { + // handle it + } +} diff --git a/pkg/healthz/doc.go b/pkg/healthz/doc.go new file mode 100644 index 0000000000..9827eeafed --- /dev/null +++ b/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/pkg/healthz/healthz.go b/pkg/healthz/healthz.go new file mode 100644 index 0000000000..bd1cc151af --- /dev/null +++ b/pkg/healthz/healthz.go @@ -0,0 +1,206 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check. +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.List() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if err := h.Checker(req); err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked. +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param. +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/pkg/healthz/healthz_suite_test.go b/pkg/healthz/healthz_suite_test.go new file mode 100644 index 0000000000..b51fcb3605 --- /dev/null +++ b/pkg/healthz/healthz_suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHealthz(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Healthz Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/healthz/healthz_test.go b/pkg/healthz/healthz_test.go new file mode 100644 index 0000000000..e0413103f7 --- /dev/null +++ b/pkg/healthz/healthz_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz_test + +import ( + "errors" + "net/http" + "net/http/httptest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/healthz" +) + +const ( + contentType = "text/plain; charset=utf-8" +) + +func requestTo(handler http.Handler, dest string) *httptest.ResponseRecorder { + req, err := http.NewRequest("GET", dest, nil) + Expect(err).NotTo(HaveOccurred()) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + + return resp +} + +var _ = Describe("Healthz Handler", func() { + Describe("the aggregated endpoint", func() { + It("should return healthy if all checks succeed", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should return unhealthy if at least one check fails", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + }) + + It("should ingore excluded checks when determining health", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/?exclude=bad1") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should be fine if asked to exclude a check that doesn't exist", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/?exclude=nonexistant") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + Context("when verbose output is requested with ?verbose=true", func() { + It("should return verbose output for ok cases", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[+]ok1 ok\n[+]ok2 ok\nhealthz check passed\n")) + }) + + It("should return verbose output for failures", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[-]bad1 failed: reason withheld\n[+]ok1 ok\nhealthz check failed\n")) + }) + }) + + It("should return non-verbose output when healthy and not specified as verbose", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/") + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("ok")) + + }) + + It("should always be verbose if a check fails", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/") + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[-]bad1 failed: reason withheld\n[+]ok1 ok\nhealthz check failed\n")) + }) + + It("should always return a ping endpoint if no other ones are present", func() { + resp := requestTo(&healthz.Handler{}, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[+]ping ok\nhealthz check passed\n")) + }) + }) + + Describe("the per-check endpoints", func() { + It("should return ok if the requested check is healthy", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "okcheck": healthz.Ping, + }} + + resp := requestTo(handler, "/okcheck") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("ok")) + }) + + It("should return an error if the requested check is unhealthy", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "failcheck": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/failcheck") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("internal server error: blech\n")) + }) + + It("shouldn't take other checks into account", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "failcheck": func(req *http.Request) error { + return errors.New("blech") + }, + "okcheck": healthz.Ping, + }} + + By("checking the bad endpoint and expecting it to fail") + resp := requestTo(handler, "/failcheck") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + + By("checking the good endpoint and expecting it to succeed") + resp = requestTo(handler, "/okcheck") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should return non-found for paths that don't match a checker", func() { + handler := &healthz.Handler{} + + resp := requestTo(handler, "/doesnotexist") + Expect(resp.Code).To(Equal(http.StatusNotFound)) + }) + + It("should always return a ping endpoint if no other ones are present", func() { + resp := requestTo(&healthz.Handler{}, "/ping") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + }) +}) diff --git a/pkg/internal/controller/controller.go b/pkg/internal/controller/controller.go new file mode 100644 index 0000000000..3732eea16e --- /dev/null +++ b/pkg/internal/controller/controller.go @@ -0,0 +1,360 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-logr/logr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/handler" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ inject.Injector = &Controller{} + +// Controller implements controller.Controller. +type Controller struct { + // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. + Name string + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler is a function that can be called at any time with the Name / Namespace of an object and + // ensures that the state of the system matches the state specified in the object. + // Defaults to the DefaultReconcileFunc. + Do reconcile.Reconciler + + // MakeQueue constructs the queue for this controller once the controller is ready to start. + // This exists because the standard Kubernetes workqueues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + MakeQueue func() workqueue.RateLimitingInterface + + // Queue is an listeningQueue that listens for events from Informers and adds object keys to + // the Queue for processing + Queue workqueue.RateLimitingInterface + + // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates + // Deprecated: the caller should handle injected fields itself. + SetFields func(i interface{}) error + + // mu is used to synchronize Controller setup + mu sync.Mutex + + // Started is true if the Controller has been Started + Started bool + + // ctx is the context that was passed to Start() and used when starting watches. + // + // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, + // while we usually always strive to follow best practices, we consider this a legacy case and it should + // undergo a major refactoring and redesign to allow for context to not be stored in a struct. + ctx context.Context + + // CacheSyncTimeout refers to the time limit set on waiting for cache to sync + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. + startWatches []watchDescription + + // LogConstructor is used to construct a logger to then log messages to users during reconciliation, + // or for example when a watch is started. + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of a reconciliation. + LogConstructor func(request *reconcile.Request) logr.Logger + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// watchDescription contains all the information necessary to start a watch. +type watchDescription struct { + src source.Source + handler handler.EventHandler + predicates []predicate.Predicate +} + +// Reconcile implements reconcile.Reconciler. +func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + defer func() { + if r := recover(); r != nil { + if c.RecoverPanic { + for _, fn := range utilruntime.PanicHandlers { + fn(r) + } + err = fmt.Errorf("panic: %v [recovered]", r) + return + } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r)) + panic(r) + } + }() + return c.Do.Reconcile(ctx, req) +} + +// Watch implements controller.Controller. +func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { + c.mu.Lock() + defer c.mu.Unlock() + + // Inject Cache into arguments + if err := c.SetFields(src); err != nil { + return err + } + if err := c.SetFields(evthdler); err != nil { + return err + } + for _, pr := range prct { + if err := c.SetFields(pr); err != nil { + return err + } + } + + // Controller hasn't started yet, store the watches locally and return. + // + // These watches are going to be held on the controller struct until the manager or user calls Start(...). + if !c.Started { + c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct}) + return nil + } + + c.LogConstructor(nil).Info("Starting EventSource", "source", src) + return src.Start(c.ctx, evthdler, c.Queue, prct...) +} + +// Start implements controller.Controller. +func (c *Controller) Start(ctx context.Context) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown + c.mu.Lock() + if c.Started { + return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") + } + + c.initMetrics() + + // Set the internal context. + c.ctx = ctx + + c.Queue = c.MakeQueue() + go func() { + <-ctx.Done() + c.Queue.ShutDown() + }() + + wg := &sync.WaitGroup{} + err := func() error { + defer c.mu.Unlock() + + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrash() + + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intendeded + // caches. + for _, watch := range c.startWatches { + c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src)) + + if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { + return err + } + } + + // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches + c.LogConstructor(nil).Info("Starting Controller") + + for _, watch := range c.startWatches { + syncingSource, ok := watch.src.(source.SyncingSource) + if !ok { + continue + } + + if err := func() error { + // use a context with timeout for launching sources and syncing caches. + sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout) + defer cancel() + + // WaitForSync waits for a definitive timeout, and returns if there + // is an error or a timeout + if err := syncingSource.WaitForSync(sourceStartCtx); err != nil { + err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) + c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync") + return err + } + + return nil + }(); err != nil { + return err + } + } + + // All the watches have been started, we can reset the local slice. + // + // We should never hold watches more than necessary, each watch source can hold a backing cache, + // which won't be garbage collected if we hold a reference to it. + c.startWatches = nil + + // Launch workers to process resources + c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles) + wg.Add(c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + go func() { + defer wg.Done() + // Run a worker thread that just dequeues items, processes them, and marks them done. + // It enforces that the reconcileHandler is never invoked concurrently with the same object. + for c.processNextWorkItem(ctx) { + } + }() + } + + c.Started = true + return nil + }() + if err != nil { + return err + } + + <-ctx.Done() + c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish") + wg.Wait() + c.LogConstructor(nil).Info("All workers finished") + return nil +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the reconcileHandler. +func (c *Controller) processNextWorkItem(ctx context.Context) bool { + obj, shutdown := c.Queue.Get() + if shutdown { + // Stop working + return false + } + + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.Queue.Done(obj) + + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1) + defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1) + + c.reconcileHandler(ctx, obj) + return true +} + +const ( + labelError = "error" + labelRequeueAfter = "requeue_after" + labelRequeue = "requeue" + labelSuccess = "success" +) + +func (c *Controller) initMetrics() { + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0) + ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) +} + +func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { + // Update metrics after processing each item + reconcileStartTS := time.Now() + defer func() { + c.updateMetrics(time.Since(reconcileStartTS)) + }() + + // Make sure that the object is a valid request. + req, ok := obj.(reconcile.Request) + if !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.Queue.Forget(obj) + c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj) + // Return true, don't take a break + return + } + + log := c.LogConstructor(&req) + + log = log.WithValues("reconcileID", uuid.NewUUID()) + ctx = logf.IntoContext(ctx, log) + + // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the + // resource to be synced. + result, err := c.Reconcile(ctx, req) + switch { + case err != nil: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() + log.Error(err, "Reconciler error") + case result.RequeueAfter > 0: + // The result.RequeueAfter request will be lost, if it is returned + // along with a non-nil error. But this is intended as + // We need to drive to stable reconcile loops before queuing due + // to result.RequestAfter + c.Queue.Forget(obj) + c.Queue.AddAfter(req, result.RequeueAfter) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() + case result.Requeue: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() + default: + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(obj) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() + } +} + +// GetLogger returns this controller's logger. +func (c *Controller) GetLogger() logr.Logger { + return c.LogConstructor(nil) +} + +// InjectFunc implement SetFields.Injector. +func (c *Controller) InjectFunc(f inject.Func) error { + c.SetFields = f + return nil +} + +// updateMetrics updates prometheus metrics within the controller. +func (c *Controller) updateMetrics(reconcileTime time.Duration) { + ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) +} diff --git a/pkg/internal/controller/controller_suite_test.go b/pkg/internal/controller/controller_suite_test.go new file mode 100644 index 0000000000..6091dd746c --- /dev/null +++ b/pkg/internal/controller/controller_suite_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Controller internal Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/internal/controller/controller_test.go b/pkg/internal/controller/controller_test.go new file mode 100644 index 0000000000..7825749490 --- /dev/null +++ b/pkg/internal/controller/controller_test.go @@ -0,0 +1,981 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ = Describe("controller", func() { + var fakeReconcile *fakeReconciler + var ctrl *Controller + var queue *controllertest.Queue + var informers *informertest.FakeInformers + var reconciled chan reconcile.Request + var request = reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + } + + BeforeEach(func() { + reconciled = make(chan reconcile.Request) + fakeReconcile = &fakeReconciler{ + Requests: reconciled, + results: make(chan fakeReconcileResultPair, 10 /* chosen by the completely scientific approach of guessing */), + } + queue = &controllertest.Queue{ + Interface: workqueue.New(), + } + informers = &informertest.FakeInformers{} + ctrl = &Controller{ + MaxConcurrentReconciles: 1, + Do: fakeReconcile, + MakeQueue: func() workqueue.RateLimitingInterface { return queue }, + LogConstructor: func(_ *reconcile.Request) logr.Logger { + return log.RuntimeLog.WithName("controller").WithName("test") + }, + } + Expect(ctrl.InjectFunc(func(interface{}) error { return nil })).To(Succeed()) + }) + + Describe("Reconciler", func() { + It("should call the Reconciler function", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{Requeue: true}, nil + }) + result, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(reconcile.Result{Requeue: true})) + }) + + It("should not recover panic if RecoverPanic is false by default", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + defer func() { + Expect(recover()).ShouldNot(BeNil()) + }() + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + var res *reconcile.Result + return *res, nil + }) + _, _ = ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + }) + + It("should recover panic if RecoverPanic is true", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + defer func() { + Expect(recover()).To(BeNil()) + }() + ctrl.RecoverPanic = true + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + var res *reconcile.Result + return *res, nil + }) + _, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("[recovered]")) + }) + }) + + Describe("Start", func() { + It("should return an error if there is an error waiting for the informers", func() { + f := false + ctrl.startWatches = []watchDescription{{ + src: source.NewKindWithCache(&corev1.Pod{}, &informertest.FakeInformers{Synced: &f}), + }} + ctrl.Name = "foo" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := ctrl.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for foo caches to sync")) + }) + + It("should error when cache sync timeout occurs", func() { + ctrl.CacheSyncTimeout = 10 * time.Nanosecond + + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + c = &cacheWithIndefinitelyBlockingGetInformer{c} + + ctrl.startWatches = []watchDescription{{ + src: source.NewKindWithCache(&appsv1.Deployment{}, c), + }} + ctrl.Name = "testcontroller" + + err = ctrl.Start(context.TODO()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for testcontroller caches to sync: timed out waiting for cache to be synced")) + }) + + It("should not error when context cancelled", func() { + ctrl.CacheSyncTimeout = 1 * time.Second + + sourceSynced := make(chan struct{}) + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + c = &cacheWithIndefinitelyBlockingGetInformer{c} + ctrl.startWatches = []watchDescription{{ + src: &singnallingSourceWrapper{ + SyncingSource: source.NewKindWithCache(&appsv1.Deployment{}, c), + cacheSyncDone: sourceSynced, + }, + }} + ctrl.Name = "testcontroller" + + ctx, cancel := context.WithCancel(context.TODO()) + go func() { + defer GinkgoRecover() + err = ctrl.Start(ctx) + Expect(err).To(Succeed()) + }() + + cancel() + <-sourceSynced + }) + + It("should not error when cache sync timeout is of sufficiently high", func() { + ctrl.CacheSyncTimeout = 1 * time.Second + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sourceSynced := make(chan struct{}) + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + ctrl.startWatches = []watchDescription{{ + src: &singnallingSourceWrapper{ + SyncingSource: source.NewKindWithCache(&appsv1.Deployment{}, c), + cacheSyncDone: sourceSynced, + }, + }} + + go func() { + defer GinkgoRecover() + Expect(c.Start(ctx)).To(Succeed()) + }() + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + }() + + <-sourceSynced + }, 10.0) + + It("should process events from source.Channel", func() { + // channel to be closed when event is processed + processed := make(chan struct{}) + // source channel to be injected + ch := make(chan event.GenericEvent, 1) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + // event to be sent to the channel + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, + } + evt := event.GenericEvent{ + Object: p, + } + + ins := &source.Channel{Source: ch} + ins.DestBufferSize = 1 + Expect(inject.StopChannelInto(ctx.Done(), ins)).To(BeTrue()) + + // send the event to the channel + ch <- evt + + ctrl.startWatches = []watchDescription{{ + src: ins, + handler: handler.Funcs{ + GenericFunc: func(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + close(processed) + }, + }, + }} + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + }() + <-processed + }) + + It("should error when channel is passed as a source but stop channel is not injected", func() { + ch := make(chan event.GenericEvent) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + ins := &source.Channel{Source: ch} + ctrl.startWatches = []watchDescription{{ + src: ins, + }} + + e := ctrl.Start(ctx) + + Expect(e).NotTo(BeNil()) + Expect(e.Error()).To(ContainSubstring("must call InjectStop on Channel before calling Start")) + }) + + It("should error when channel source is not specified", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ins := &source.Channel{} + Expect(inject.StopChannelInto(make(<-chan struct{}), ins)).To(BeTrue()) + + ctrl.startWatches = []watchDescription{{ + src: &source.Channel{}, + }} + + e := ctrl.Start(ctx) + Expect(e).NotTo(BeNil()) + Expect(e.Error()).To(ContainSubstring("must specify Channel.Source")) + }) + + It("should call Start on sources with the appropriate EventHandler, Queue, and Predicates", func() { + pr1 := &predicate.Funcs{} + pr2 := &predicate.Funcs{} + evthdl := &handler.EnqueueRequestForObject{} + started := false + src := source.Func(func(ctx context.Context, e handler.EventHandler, q workqueue.RateLimitingInterface, p ...predicate.Predicate) error { + defer GinkgoRecover() + Expect(e).To(Equal(evthdl)) + Expect(q).To(Equal(ctrl.Queue)) + Expect(p).To(ConsistOf(pr1, pr2)) + + started = true + return nil + }) + Expect(ctrl.Watch(src, evthdl, pr1, pr2)).NotTo(HaveOccurred()) + + // Use a cancelled context so Start doesn't block + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(ctrl.Start(ctx)).To(Succeed()) + Expect(started).To(BeTrue()) + }) + + It("should return an error if there is an error starting sources", func() { + err := fmt.Errorf("Expected Error: could not start source") + src := source.Func(func(context.Context, handler.EventHandler, + workqueue.RateLimitingInterface, + ...predicate.Predicate) error { + defer GinkgoRecover() + return err + }) + Expect(ctrl.Watch(src, &handler.EnqueueRequestForObject{})).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + Expect(ctrl.Start(ctx)).To(Equal(err)) + }) + + It("should return an error if it gets started more than once", func() { + // Use a cancelled context so Start doesn't block + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(ctrl.Start(ctx)).To(BeNil()) + err := ctrl.Start(ctx) + Expect(err).NotTo(BeNil()) + Expect(err.Error()).To(Equal("controller was started more than once. This is likely to be caused by being added to a manager multiple times")) + }) + + }) + + Describe("Watch", func() { + It("should inject dependencies into the Source", func() { + src := &source.Kind{Type: &corev1.Pod{}} + Expect(src.InjectCache(informers)).To(Succeed()) + evthdl := &handler.EnqueueRequestForObject{} + found := false + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == src { + found = true + } + return nil + } + Expect(ctrl.Watch(src, evthdl)).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "Source not injected") + }) + + It("should return an error if there is an error injecting into the Source", func() { + src := &source.Kind{Type: &corev1.Pod{}} + Expect(src.InjectCache(informers)).To(Succeed()) + evthdl := &handler.EnqueueRequestForObject{} + expected := fmt.Errorf("expect fail source") + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == src { + return expected + } + return nil + } + Expect(ctrl.Watch(src, evthdl)).To(Equal(expected)) + }) + + It("should inject dependencies into the EventHandler", func() { + src := &source.Kind{Type: &corev1.Pod{}} + Expect(src.InjectCache(informers)).To(Succeed()) + evthdl := &handler.EnqueueRequestForObject{} + found := false + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == evthdl { + found = true + } + return nil + } + Expect(ctrl.Watch(src, evthdl)).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "EventHandler not injected") + }) + + It("should return an error if there is an error injecting into the EventHandler", func() { + src := &source.Kind{Type: &corev1.Pod{}} + evthdl := &handler.EnqueueRequestForObject{} + expected := fmt.Errorf("expect fail eventhandler") + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == evthdl { + return expected + } + return nil + } + Expect(ctrl.Watch(src, evthdl)).To(Equal(expected)) + }) + + PIt("should inject dependencies into the Reconciler", func() { + // TODO(community): Write this + }) + + PIt("should return an error if there is an error injecting into the Reconciler", func() { + // TODO(community): Write this + }) + + It("should inject dependencies into all of the Predicates", func() { + src := &source.Kind{Type: &corev1.Pod{}} + Expect(src.InjectCache(informers)).To(Succeed()) + evthdl := &handler.EnqueueRequestForObject{} + pr1 := &predicate.Funcs{} + pr2 := &predicate.Funcs{} + found1 := false + found2 := false + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == pr1 { + found1 = true + } + if i == pr2 { + found2 = true + } + return nil + } + Expect(ctrl.Watch(src, evthdl, pr1, pr2)).NotTo(HaveOccurred()) + Expect(found1).To(BeTrue(), "First Predicated not injected") + Expect(found2).To(BeTrue(), "Second Predicated not injected") + }) + + It("should return an error if there is an error injecting into any of the Predicates", func() { + src := &source.Kind{Type: &corev1.Pod{}} + Expect(src.InjectCache(informers)).To(Succeed()) + evthdl := &handler.EnqueueRequestForObject{} + pr1 := &predicate.Funcs{} + pr2 := &predicate.Funcs{} + expected := fmt.Errorf("expect fail predicate") + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == pr1 { + return expected + } + return nil + } + Expect(ctrl.Watch(src, evthdl, pr1, pr2)).To(Equal(expected)) + + ctrl.SetFields = func(i interface{}) error { + defer GinkgoRecover() + if i == pr2 { + return expected + } + return nil + } + Expect(ctrl.Watch(src, evthdl, pr1, pr2)).To(Equal(expected)) + }) + }) + + Describe("Processing queue items from a Controller", func() { + It("should call Reconciler if an item is enqueued", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + queue.Add(request) + + By("Invoking Reconciler") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + + By("Removing the item from the queue") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + }) + + It("should continue to process additional queue items after the first", func() { + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + Fail("Reconciler should not have been called") + return reconcile.Result{}, nil + }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + By("adding two bad items to the queue") + queue.Add("foo/bar1") + queue.Add("foo/bar2") + + By("expecting both of them to be skipped") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + }) + + PIt("should forget an item if it is not a Request and continue processing items", func() { + // TODO(community): write this test + }) + + It("should requeue a Request if there is an error and continue processing items", func() { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + queue.Add(request) + + By("Invoking Reconciler which will give an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) + Expect(<-reconciled).To(Equal(request)) + + By("Invoking Reconciler a second time without error") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + + By("Removing the item from the queue") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + }, 1.0) + + // TODO(directxman12): we should ensure that backoff occurrs with error requeue + + It("should not reset backoff until there's a non-error result", func() { + dq := &DelegatingQueue{RateLimitingInterface: ctrl.MakeQueue()} + ctrl.MakeQueue = func() workqueue.RateLimitingInterface { return dq } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which returns an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("something's wrong")) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) + + By("Invoking Reconciler a second time with an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("another thing's wrong")) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 2})) + + By("Invoking Reconciler a third time, where it finally does not return an error") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddRateLimited: 2})) + + By("Removing the item from the queue") + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) + + It("should requeue a Request with rate limiting if the Result sets Requeue:true and continue processing items", func() { + dq := &DelegatingQueue{RateLimitingInterface: ctrl.MakeQueue()} + ctrl.MakeQueue = func() workqueue.RateLimitingInterface { return dq } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which will ask for requeue") + fakeReconcile.AddResult(reconcile.Result{Requeue: true}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) + + By("Invoking Reconciler a second time without asking for requeue") + fakeReconcile.AddResult(reconcile.Result{Requeue: false}, nil) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddRateLimited: 1})) + + By("Removing the item from the queue") + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) + + It("should requeue a Request after a duration (but not rate-limitted) if the Result sets RequeueAfter (regardless of Requeue)", func() { + dq := &DelegatingQueue{RateLimitingInterface: ctrl.MakeQueue()} + ctrl.MakeQueue = func() workqueue.RateLimitingInterface { return dq } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which will ask for requeue & requeueafter") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100, Requeue: true}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddAfter: 1})) + + By("Invoking Reconciler a second time asking for a requeueafter only") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, nil) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: -1 /* we don't increment the count in addafter */, AddAfter: 2})) + + By("Removing the item from the queue") + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) + + It("should perform error behavior if error is not nil, regardless of RequeueAfter", func() { + dq := &DelegatingQueue{RateLimitingInterface: ctrl.MakeQueue()} + ctrl.MakeQueue = func() workqueue.RateLimitingInterface { return dq } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which will ask for requeueafter with an error") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, fmt.Errorf("expected error: reconcile")) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) + + By("Invoking Reconciler a second time asking for requeueafter without errors") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{AddAfter: 1, AddRateLimited: 1})) + + By("Removing the item from the queue") + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) + + PIt("should return if the queue is shutdown", func() { + // TODO(community): write this test + }) + + PIt("should wait for informers to be synced before processing items", func() { + // TODO(community): write this test + }) + + PIt("should create a new go routine for MaxConcurrentReconciles", func() { + // TODO(community): write this test + }) + + Context("prometheus metric reconcile_total", func() { + var reconcileTotal dto.Metric + + BeforeEach(func() { + ctrlmetrics.ReconcileTotal.Reset() + reconcileTotal.Reset() + }) + + It("should get updated on successful reconciliation", func() { + Expect(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "success").Write(&reconcileTotal)).To(Succeed()) + if reconcileTotal.GetCounter().GetValue() != 0.0 { + return fmt.Errorf("metric reconcile total not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + By("Invoking Reconciler which will succeed") + queue.Add(request) + + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "success").Write(&reconcileTotal)).To(Succeed()) + if actual := reconcileTotal.GetCounter().GetValue(); actual != 1.0 { + return fmt.Errorf("metric reconcile total expected: %v and got: %v", 1.0, actual) + } + return nil + }, 2.0).Should(Succeed()) + }, 2.0) + + It("should get updated on reconcile errors", func() { + Expect(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "error").Write(&reconcileTotal)).To(Succeed()) + if reconcileTotal.GetCounter().GetValue() != 0.0 { + return fmt.Errorf("metric reconcile total not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + By("Invoking Reconciler which will give an error") + queue.Add(request) + + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "error").Write(&reconcileTotal)).To(Succeed()) + if actual := reconcileTotal.GetCounter().GetValue(); actual != 1.0 { + return fmt.Errorf("metric reconcile total expected: %v and got: %v", 1.0, actual) + } + return nil + }, 2.0).Should(Succeed()) + }, 2.0) + + It("should get updated when reconcile returns with retry enabled", func() { + Expect(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "retry").Write(&reconcileTotal)).To(Succeed()) + if reconcileTotal.GetCounter().GetValue() != 0.0 { + return fmt.Errorf("metric reconcile total not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + By("Invoking Reconciler which will return result with Requeue enabled") + queue.Add(request) + + fakeReconcile.AddResult(reconcile.Result{Requeue: true}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "requeue").Write(&reconcileTotal)).To(Succeed()) + if actual := reconcileTotal.GetCounter().GetValue(); actual != 1.0 { + return fmt.Errorf("metric reconcile total expected: %v and got: %v", 1.0, actual) + } + return nil + }, 2.0).Should(Succeed()) + }, 2.0) + + It("should get updated when reconcile returns with retryAfter enabled", func() { + Expect(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "retry_after").Write(&reconcileTotal)).To(Succeed()) + if reconcileTotal.GetCounter().GetValue() != 0.0 { + return fmt.Errorf("metric reconcile total not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + By("Invoking Reconciler which will return result with requeueAfter enabled") + queue.Add(request) + + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: 5 * time.Hour}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() error { + Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "requeue_after").Write(&reconcileTotal)).To(Succeed()) + if actual := reconcileTotal.GetCounter().GetValue(); actual != 1.0 { + return fmt.Errorf("metric reconcile total expected: %v and got: %v", 1.0, actual) + } + return nil + }, 2.0).Should(Succeed()) + }, 2.0) + }) + + Context("should update prometheus metrics", func() { + It("should requeue a Request if there is an error and continue processing items", func() { + var reconcileErrs dto.Metric + ctrlmetrics.ReconcileErrors.Reset() + Expect(func() error { + Expect(ctrlmetrics.ReconcileErrors.WithLabelValues(ctrl.Name).Write(&reconcileErrs)).To(Succeed()) + if reconcileErrs.GetCounter().GetValue() != 0.0 { + return fmt.Errorf("metric reconcile errors not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + queue.Add(request) + + By("Invoking Reconciler which will give an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() error { + Expect(ctrlmetrics.ReconcileErrors.WithLabelValues(ctrl.Name).Write(&reconcileErrs)).To(Succeed()) + if reconcileErrs.GetCounter().GetValue() != 1.0 { + return fmt.Errorf("metrics not updated") + } + return nil + }, 2.0).Should(Succeed()) + + By("Invoking Reconciler a second time without error") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + + By("Removing the item from the queue") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + }, 2.0) + + It("should add a reconcile time to the reconcile time histogram", func() { + var reconcileTime dto.Metric + ctrlmetrics.ReconcileTime.Reset() + + Expect(func() error { + histObserver := ctrlmetrics.ReconcileTime.WithLabelValues(ctrl.Name) + hist := histObserver.(prometheus.Histogram) + Expect(hist.Write(&reconcileTime)).To(Succeed()) + if reconcileTime.GetHistogram().GetSampleCount() != uint64(0) { + return fmt.Errorf("metrics not reset") + } + return nil + }()).Should(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + queue.Add(request) + + By("Invoking Reconciler") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) + + By("Removing the item from the queue") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + + Eventually(func() error { + histObserver := ctrlmetrics.ReconcileTime.WithLabelValues(ctrl.Name) + hist := histObserver.(prometheus.Histogram) + Expect(hist.Write(&reconcileTime)).To(Succeed()) + if reconcileTime.GetHistogram().GetSampleCount() == uint64(0) { + return fmt.Errorf("metrics not updated") + } + return nil + }, 2.0).Should(Succeed()) + }, 4.0) + }) + }) +}) + +type DelegatingQueue struct { + workqueue.RateLimitingInterface + mu sync.Mutex + + countAddRateLimited int + countAdd int + countAddAfter int +} + +func (q *DelegatingQueue) AddRateLimited(item interface{}) { + q.mu.Lock() + defer q.mu.Unlock() + + q.countAddRateLimited++ + q.RateLimitingInterface.AddRateLimited(item) +} + +func (q *DelegatingQueue) AddAfter(item interface{}, d time.Duration) { + q.mu.Lock() + defer q.mu.Unlock() + + q.countAddAfter++ + q.RateLimitingInterface.AddAfter(item, d) +} + +func (q *DelegatingQueue) Add(item interface{}) { + q.mu.Lock() + defer q.mu.Unlock() + q.countAdd++ + + q.RateLimitingInterface.Add(item) +} + +func (q *DelegatingQueue) Forget(item interface{}) { + q.mu.Lock() + defer q.mu.Unlock() + q.countAdd-- + + q.RateLimitingInterface.Forget(item) +} + +type countInfo struct { + Trying, AddAfter, AddRateLimited int +} + +func (q *DelegatingQueue) getCounts() countInfo { + q.mu.Lock() + defer q.mu.Unlock() + + return countInfo{ + Trying: q.countAdd, + AddAfter: q.countAddAfter, + AddRateLimited: q.countAddRateLimited, + } +} + +type fakeReconcileResultPair struct { + Result reconcile.Result + Err error +} + +type fakeReconciler struct { + Requests chan reconcile.Request + results chan fakeReconcileResultPair +} + +func (f *fakeReconciler) AddResult(res reconcile.Result, err error) { + f.results <- fakeReconcileResultPair{Result: res, Err: err} +} + +func (f *fakeReconciler) Reconcile(_ context.Context, r reconcile.Request) (reconcile.Result, error) { + res := <-f.results + if f.Requests != nil { + f.Requests <- r + } + return res.Result, res.Err +} + +type singnallingSourceWrapper struct { + cacheSyncDone chan struct{} + source.SyncingSource +} + +func (s *singnallingSourceWrapper) WaitForSync(ctx context.Context) error { + defer func() { + close(s.cacheSyncDone) + }() + return s.SyncingSource.WaitForSync(ctx) +} + +var _ cache.Cache = &cacheWithIndefinitelyBlockingGetInformer{} + +// cacheWithIndefinitelyBlockingGetInformer has a GetInformer implementation that blocks indefinitely or until its +// context is cancelled. +// We need it as a workaround for testenvs lack of support for a secure apiserver, because the insecure port always +// implies the allow all authorizer, so we can not simulate rbac issues with it. They are the usual cause of the real +// caches GetInformer blocking showing this behavior. +// TODO: Remove this once envtest supports a secure apiserver. +type cacheWithIndefinitelyBlockingGetInformer struct { + cache.Cache +} + +func (c *cacheWithIndefinitelyBlockingGetInformer) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) { + <-ctx.Done() + return nil, errors.New("GetInformer timed out") +} diff --git a/pkg/internal/controller/metrics/metrics.go b/pkg/internal/controller/metrics/metrics.go new file mode 100644 index 0000000000..baec669277 --- /dev/null +++ b/pkg/internal/controller/metrics/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReconcileTotal is a prometheus counter metrics which holds the total + // number of reconciliations per controller. It has two labels. controller label refers + // to the controller name and result label refers to the reconcile result i.e + // success, error, requeue, requeue_after. + ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_total", + Help: "Total number of reconciliations per controller", + }, []string{"controller", "result"}) + + // ReconcileErrors is a prometheus counter metrics which holds the total + // number of errors from the Reconciler. + ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_errors_total", + Help: "Total number of reconciliation errors per controller", + }, []string{"controller"}) + + // ReconcileTime is a prometheus metric which keeps track of the duration + // of reconciliations. + ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "controller_runtime_reconcile_time_seconds", + Help: "Length of time per reconciliation per controller", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + }, []string{"controller"}) + + // WorkerCount is a prometheus metric which holds the number of + // concurrent reconciles per controller. + WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_max_concurrent_reconciles", + Help: "Maximum number of concurrent reconciles per controller", + }, []string{"controller"}) + + // ActiveWorkers is a prometheus metric which holds the number + // of active workers per controller. + ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_active_workers", + Help: "Number of currently used workers per controller", + }, []string{"controller"}) +) + +func init() { + metrics.Registry.MustRegister( + ReconcileTotal, + ReconcileErrors, + ReconcileTime, + WorkerCount, + ActiveWorkers, + // expose process metrics like CPU, Memory, file descriptor usage etc. + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + // expose Go runtime metrics like GC stats, memory stats etc. + collectors.NewGoCollector(), + ) +} diff --git a/pkg/internal/flock/doc.go b/pkg/internal/flock/doc.go new file mode 100644 index 0000000000..11e39823ed --- /dev/null +++ b/pkg/internal/flock/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid +// importing k8s.io/kubernetes as a dependency. +// +// Provides file locking functionalities on unix systems. +package flock diff --git a/pkg/internal/flock/errors.go b/pkg/internal/flock/errors.go new file mode 100644 index 0000000000..ee7a434372 --- /dev/null +++ b/pkg/internal/flock/errors.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import "errors" + +var ( + // ErrAlreadyLocked is returned when the file is already locked. + ErrAlreadyLocked = errors.New("the file is already locked") +) diff --git a/pkg/internal/flock/flock_other.go b/pkg/internal/flock/flock_other.go new file mode 100644 index 0000000000..069a5b3a2c --- /dev/null +++ b/pkg/internal/flock/flock_other.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +// Acquire is not implemented on non-unix systems. +func Acquire(path string) error { + return nil +} diff --git a/pkg/internal/flock/flock_unix.go b/pkg/internal/flock/flock_unix.go new file mode 100644 index 0000000000..71ec576df2 --- /dev/null +++ b/pkg/internal/flock/flock_unix.go @@ -0,0 +1,48 @@ +//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// Acquire acquires a lock on a file for the duration of the process. This method +// is reentrant. +func Acquire(path string) error { + fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) + if err != nil { + if errors.Is(err, os.ErrExist) { + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err + } + + // We don't need to close the fd since we should hold + // it until the process exits. + err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX) + if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB. + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err +} diff --git a/pkg/internal/httpserver/server.go b/pkg/internal/httpserver/server.go new file mode 100644 index 0000000000..b5f91f18e0 --- /dev/null +++ b/pkg/internal/httpserver/server.go @@ -0,0 +1,16 @@ +package httpserver + +import ( + "net/http" + "time" +) + +// New returns a new server with sane defaults. +func New(handler http.Handler) *http.Server { + return &http.Server{ + Handler: handler, + MaxHeaderBytes: 1 << 20, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, + } +} diff --git a/pkg/internal/log/log.go b/pkg/internal/log/log.go new file mode 100644 index 0000000000..d91a0ca50c --- /dev/null +++ b/pkg/internal/log/log.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) + +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") +} diff --git a/pkg/internal/objectutil/objectutil.go b/pkg/internal/objectutil/objectutil.go new file mode 100644 index 0000000000..7057f3dbe4 --- /dev/null +++ b/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + "errors" + "fmt" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsAPINamespacedWithGVK(gvk, scheme, restmapper) +} + +// IsAPINamespacedWithGVK returns true if the object having the provided +// GVK is namespace scoped. +func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != apimeta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/pkg/internal/recorder/recorder.go b/pkg/internal/recorder/recorder.go new file mode 100644 index 0000000000..9d8b2f0740 --- /dev/null +++ b/pkg/internal/recorder/recorder.go @@ -0,0 +1,176 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" +) + +// EventBroadcasterProducer makes an event broadcaster, returning +// whether or not the broadcaster should be stopped with the Provider, +// or not (e.g. if it's shared, it shouldn't be stopped with the Provider). +type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool) + +// Provider is a recorder.Provider that records events to the k8s API server +// and to a logr Logger. +type Provider struct { + lock sync.RWMutex + stopped bool + + // scheme to specify when creating a recorder + scheme *runtime.Scheme + // logger is the logger to use when logging diagnostic event info + logger logr.Logger + evtClient corev1client.EventInterface + makeBroadcaster EventBroadcasterProducer + + broadcasterOnce sync.Once + broadcaster record.EventBroadcaster + stopBroadcaster bool +} + +// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to +// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election +// code finishes up and tries to continue emitting events. + +// Stop attempts to stop this provider, stopping the underlying broadcaster +// if the broadcaster asked to be stopped. It kinda tries to honor the given +// context, but the underlying broadcaster has an indefinite wait that doesn't +// return until all queued events are flushed, so this may end up just returning +// before the underlying wait has finished instead of cancelling the wait. +// This is Very Frustrating™. +func (p *Provider) Stop(shutdownCtx context.Context) { + doneCh := make(chan struct{}) + + go func() { + // technically, this could start the broadcaster, but practically, it's + // almost certainly already been started (e.g. by leader election). We + // need to invoke this to ensure that we don't inadvertently race with + // an invocation of getBroadcaster. + broadcaster := p.getBroadcaster() + if p.stopBroadcaster { + p.lock.Lock() + broadcaster.Shutdown() + p.stopped = true + p.lock.Unlock() + } + close(doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-doneCh: + } +} + +// getBroadcaster ensures that a broadcaster is started for this +// provider, and returns it. It's threadsafe. +func (p *Provider) getBroadcaster() record.EventBroadcaster { + // NB(directxman12): this can technically still leak if something calls + // "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we + // create the broadcaster in start, we could race with other things that + // are started at the same time & want to emit events. The alternative is + // silently swallowing events and more locking, but that seems suboptimal. + + p.broadcasterOnce.Do(func() { + broadcaster, stop := p.makeBroadcaster() + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient}) + broadcaster.StartEventWatcher( + func(e *corev1.Event) { + p.logger.V(1).Info(e.Message, "type", e.Type, "object", e.InvolvedObject, "reason", e.Reason) + }) + p.broadcaster = broadcaster + p.stopBroadcaster = stop + }) + + return p.broadcaster +} + +// NewProvider create a new Provider instance. +func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to init client: %w", err) + } + + p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: corev1Client.Events("")} + return p, nil +} + +// GetEventRecorderFor returns an event recorder that broadcasts to this provider's +// broadcaster. All events will be associated with a component of the given name. +func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder { + return &lazyRecorder{ + prov: p, + name: name, + } +} + +// lazyRecorder is a recorder that doesn't actually instantiate any underlying +// recorder until the first event is emitted. +type lazyRecorder struct { + prov *Provider + name string + + recOnce sync.Once + rec record.EventRecorder +} + +// ensureRecording ensures that a concrete recorder is populated for this recorder. +func (l *lazyRecorder) ensureRecording() { + l.recOnce.Do(func() { + broadcaster := l.prov.getBroadcaster() + l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name}) + }) +} + +func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Event(object, eventtype, reason, message) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Eventf(object, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} diff --git a/pkg/internal/recorder/recorder_integration_test.go b/pkg/internal/recorder/recorder_integration_test.go new file mode 100644 index 0000000000..5bafaabf5a --- /dev/null +++ b/pkg/internal/recorder/recorder_integration_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder_test + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + ref "k8s.io/client-go/tools/reference" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("recorder", func() { + Describe("recorder", func() { + It("should publish events", func() { + By("Creating the Manager") + cm, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the Controller") + recorder := cm.GetEventRecorderFor("test-recorder") + instance, err := controller.New("foo-controller", cm, controller.Options{ + Reconciler: reconcile.Func( + func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + dp, err := clientset.AppsV1().Deployments(request.Namespace).Get(ctx, request.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + recorder.Event(dp, corev1.EventTypeNormal, "test-reason", "test-msg") + return reconcile.Result{}, nil + }), + }) + Expect(err).NotTo(HaveOccurred()) + + By("Watching Resources") + err = instance.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}) + Expect(err).NotTo(HaveOccurred()) + + By("Starting the Manager") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(cm.Start(ctx)).NotTo(HaveOccurred()) + }() + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-name"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + + By("Invoking Reconciling") + deployment, err = clientset.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Validate event is published as expected") + evtWatcher, err := clientset.CoreV1().Events("default").Watch(ctx, metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + resultEvent := <-evtWatcher.ResultChan() + + Expect(resultEvent.Type).To(Equal(watch.Added)) + evt, isEvent := resultEvent.Object.(*corev1.Event) + Expect(isEvent).To(BeTrue()) + + dpRef, err := ref.GetReference(scheme.Scheme, deployment) + Expect(err).NotTo(HaveOccurred()) + + Expect(evt.InvolvedObject).To(Equal(*dpRef)) + Expect(evt.Type).To(Equal(corev1.EventTypeNormal)) + Expect(evt.Reason).To(Equal("test-reason")) + Expect(evt.Message).To(Equal("test-msg")) + }) + }) +}) diff --git a/pkg/internal/recorder/recorder_suite_test.go b/pkg/internal/recorder/recorder_suite_test.go new file mode 100644 index 0000000000..ee8f98fae0 --- /dev/null +++ b/pkg/internal/recorder/recorder_suite_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestRecorder(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Recorder Integration Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/internal/recorder/recorder_test.go b/pkg/internal/recorder/recorder_test.go new file mode 100644 index 0000000000..86bcdd36f6 --- /dev/null +++ b/pkg/internal/recorder/recorder_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder_test + +import ( + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +var _ = Describe("recorder.Provider", func() { + makeBroadcaster := func() (record.EventBroadcaster, bool) { return record.NewBroadcaster(), true } + Describe("NewProvider", func() { + It("should return a provider instance and a nil error.", func() { + provider, err := recorder.NewProvider(cfg, scheme.Scheme, logr.Discard(), makeBroadcaster) + Expect(provider).NotTo(BeNil()) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return an error if failed to init client.", func() { + // Invalid the config + cfg1 := *cfg + cfg1.Host = "invalid host" + _, err := recorder.NewProvider(&cfg1, scheme.Scheme, logr.Discard(), makeBroadcaster) + Expect(err).NotTo(BeNil()) + Expect(err.Error()).To(ContainSubstring("failed to init client")) + }) + }) + Describe("GetEventRecorder", func() { + It("should return a recorder instance.", func() { + provider, err := recorder.NewProvider(cfg, scheme.Scheme, logr.Discard(), makeBroadcaster) + Expect(err).NotTo(HaveOccurred()) + + recorder := provider.GetEventRecorderFor("test") + Expect(recorder).NotTo(BeNil()) + }) + }) +}) diff --git a/pkg/internal/testing/OWNERS b/pkg/internal/testing/OWNERS new file mode 100644 index 0000000000..25fda2ebac --- /dev/null +++ b/pkg/internal/testing/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md + +approvers: + - testing-integration-approvers diff --git a/pkg/internal/testing/addr/addr_suite_test.go b/pkg/internal/testing/addr/addr_suite_test.go new file mode 100644 index 0000000000..b18c62def9 --- /dev/null +++ b/pkg/internal/testing/addr/addr_suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestAddr(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + suiteName := "Addr Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/internal/testing/addr/manager.go b/pkg/internal/testing/addr/manager.go new file mode 100644 index 0000000000..ffa33a8861 --- /dev/null +++ b/pkg/internal/testing/addr/manager.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr + +import ( + "errors" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/flock" +) + +// TODO(directxman12): interface / release functionality for external port managers + +const ( + portReserveTime = 2 * time.Minute + portConflictRetry = 100 + portFilePrefix = "port-" +) + +var ( + cacheDir string +) + +func init() { + baseDir, err := os.UserCacheDir() + if err == nil { + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + // Either we didn't get a cache directory, or we can't use it + baseDir = os.TempDir() + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + panic(err) + } +} + +type portCache struct{} + +func (c *portCache) add(port int) (bool, error) { + // Remove outdated ports. + if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) { + return nil + } + info, err := d.Info() + if err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + if time.Since(info.ModTime()) > portReserveTime { + if err := os.Remove(filepath.Join(cacheDir, path)); err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if os.IsNotExist(err) { + return nil + } + return err + } + } + return nil + }); err != nil { + return false, err + } + // Try allocating new port, by acquiring a file. + path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port) + if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +var cache = &portCache{} + +func suggest(listenHost string) (*net.TCPListener, int, string, error) { + if listenHost == "" { + listenHost = "localhost" + } + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0")) + if err != nil { + return nil, -1, "", err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, -1, "", err + } + return l, l.Addr().(*net.TCPAddr).Port, + addr.IP.String(), + nil +} + +// Suggest suggests an address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +// It makes sure that new port allocated does not conflict with old ports +// allocated within 1 minute. +func Suggest(listenHost string) (int, string, error) { + for i := 0; i < portConflictRetry; i++ { + listener, port, resolvedHost, err := suggest(listenHost) + if err != nil { + return -1, "", err + } + defer listener.Close() + if ok, err := cache.add(port); ok { + return port, resolvedHost, nil + } else if err != nil { + return -1, "", err + } + } + return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry) +} diff --git a/pkg/internal/testing/addr/manager_test.go b/pkg/internal/testing/addr/manager_test.go new file mode 100644 index 0000000000..cf95c36115 --- /dev/null +++ b/pkg/internal/testing/addr/manager_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr_test + +import ( + "net" + "strconv" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" +) + +var _ = Describe("SuggestAddress", func() { + It("returns a free port and an address to bind to", func() { + port, host, err := addr.Suggest("") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Or(Equal("127.0.0.1"), Equal("::1"))) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) + + It("supports an explicit listenHost", func() { + port, host, err := addr.Suggest("localhost") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Or(Equal("127.0.0.1"), Equal("::1"))) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) + + It("supports a 0.0.0.0 listenHost", func() { + port, host, err := addr.Suggest("0.0.0.0") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Equal("0.0.0.0")) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/pkg/internal/testing/certs/certs_suite_test.go b/pkg/internal/testing/certs/certs_suite_test.go new file mode 100644 index 0000000000..5b63fc4f55 --- /dev/null +++ b/pkg/internal/testing/certs/certs_suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestInternal(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + suiteName := "TinyCA (Internal Certs) Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/internal/testing/certs/tinyca.go b/pkg/internal/testing/certs/tinyca.go new file mode 100644 index 0000000000..b4188237e6 --- /dev/null +++ b/pkg/internal/testing/certs/tinyca.go @@ -0,0 +1,224 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +// NB(directxman12): nothing has verified that this has good settings. In fact, +// the setting generated here are probably terrible, but they're fine for integration +// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're +// ONLY for use with envtest's ability to configure webhook testing. +// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that. + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + certutil "k8s.io/client-go/util/cert" +) + +var ( + ellipticCurve = elliptic.P256() + bigOne = big.NewInt(1) +) + +// CertPair is a private key and certificate for use for client auth, as a CA, or serving. +type CertPair struct { + Key crypto.Signer + Cert *x509.Certificate +} + +// CertBytes returns the PEM-encoded version of the certificate for this pair. +func (k CertPair) CertBytes() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Cert.Raw, + }) +} + +// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and +// PKCS8, respectively). +func (k CertPair) AsBytes() (cert []byte, key []byte, err error) { + cert = k.CertBytes() + + rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to encode private key: %w", err) + } + + key = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: rawKeyData, + }) + + return cert, key, nil +} + +// TinyCA supports signing serving certs and client-certs, +// and can be used as an auth mechanism with envtest. +type TinyCA struct { + CA CertPair + orgName string + + nextSerial *big.Int +} + +// newPrivateKey generates a new private key of a relatively sane size (see +// rsaKeySize). +func newPrivateKey() (crypto.Signer, error) { + return ecdsa.GenerateKey(ellipticCurve, crand.Reader) +} + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +func NewTinyCA() (*TinyCA, error) { + caPrivateKey, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to generate private key for CA: %w", err) + } + caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}} + caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate for CA: %w", err) + } + + return &TinyCA{ + CA: CertPair{Key: caPrivateKey, Cert: caCert}, + orgName: "envtest", + nextSerial: big.NewInt(1), + }, nil +} + +func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) { + now := time.Now() + + key, err := newPrivateKey() + if err != nil { + return CertPair{}, fmt.Errorf("unable to create private key: %w", err) + } + + serial := new(big.Int).Set(c.nextSerial) + c.nextSerial.Add(c.nextSerial, bigOne) + + template := x509.Certificate{ + Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization}, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + + // technically not necessary for testing, but let's set anyway just in case. + NotBefore: now.UTC(), + // 1 week -- the default for cfssl, and just long enough for a + // long-term test, but not too long that anyone would try to use this + // seriously. + NotAfter: now.Add(168 * time.Hour).UTC(), + } + + certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key) + if err != nil { + return CertPair{}, fmt.Errorf("unable to create certificate: %w", err) + } + + cert, err := x509.ParseCertificate(certRaw) + if err != nil { + return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %w", err) + } + + return CertPair{ + Key: key, + Cert: cert, + }, nil +} + +// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names). +func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) { + if len(names) == 0 { + names = []string{"localhost"} + } + dnsNames, ips, err := resolveNames(names) + if err != nil { + return CertPair{}, err + } + + return c.makeCert(certutil.Config{ + CommonName: "localhost", + Organization: []string{c.orgName}, + AltNames: certutil.AltNames{ + DNSNames: dnsNames, + IPs: ips, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) +} + +// ClientInfo describes some Kubernetes user for the purposes of creating +// client certificates. +type ClientInfo struct { + // Name is the user name (embedded as the cert's CommonName) + Name string + // Groups are the groups to which this user belongs (embedded as the cert's + // Organization) + Groups []string +} + +// NewClientCert produces a new CertPair suitable for use with Kubernetes +// client cert auth with an API server validating based on this CA. +func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) { + return c.makeCert(certutil.Config{ + CommonName: user.Name, + Organization: user.Groups, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) +} + +func resolveNames(names []string) ([]string, []net.IP, error) { + dnsNames := []string{} + ips := []net.IP{} + for _, name := range names { + if name == "" { + continue + } + ip := net.ParseIP(name) + if ip == nil { + dnsNames = append(dnsNames, name) + // Also resolve to IPs. + nameIPs, err := net.LookupHost(name) + if err != nil { + return nil, nil, err + } + for _, nameIP := range nameIPs { + ip = net.ParseIP(nameIP) + if ip != nil { + ips = append(ips, ip) + } + } + } else { + ips = append(ips, ip) + } + } + return dnsNames, ips, nil +} diff --git a/pkg/internal/testing/certs/tinyca_test.go b/pkg/internal/testing/certs/tinyca_test.go new file mode 100644 index 0000000000..e3f2513210 --- /dev/null +++ b/pkg/internal/testing/certs/tinyca_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs_test + +import ( + "crypto/x509" + "encoding/pem" + "math/big" + "net" + "sort" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +var _ = Describe("TinyCA", func() { + var ca *certs.TinyCA + + BeforeEach(func() { + var err error + ca, err = certs.NewTinyCA() + Expect(err).NotTo(HaveOccurred(), "should be able to initialize the CA") + }) + + Describe("the CA certs themselves", func() { + It("should be retrievable as a cert pair", func() { + Expect(ca.CA.Key).NotTo(BeNil(), "should have a key") + Expect(ca.CA.Cert).NotTo(BeNil(), "should have a cert") + }) + + It("should be usable for signing & verifying", func() { + Expect(ca.CA.Cert.KeyUsage&x509.KeyUsageCertSign).NotTo(Equal(0), "should be usable for cert signing") + Expect(ca.CA.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(Equal(0), "should be usable for signature verifying") + }) + }) + + It("should produce unique serials among all generated certificates of all types", func() { + By("generating a few cert pairs for both serving and client auth") + firstCerts, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred()) + secondCerts, err := ca.NewClientCert(certs.ClientInfo{Name: "user"}) + Expect(err).NotTo(HaveOccurred()) + thirdCerts, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred()) + + By("checking that they have different serials") + serials := []*big.Int{ + firstCerts.Cert.SerialNumber, + secondCerts.Cert.SerialNumber, + thirdCerts.Cert.SerialNumber, + } + // quick uniqueness check of numbers: sort, then you only have to compare sequential entries + sort.Slice(serials, func(i, j int) bool { + return serials[i].Cmp(serials[j]) == -1 + }) + Expect(serials[1].Cmp(serials[0])).NotTo(Equal(0), "serials shouldn't be equal") + Expect(serials[2].Cmp(serials[1])).NotTo(Equal(0), "serials shouldn't be equal") + }) + + Describe("Generated serving certs", func() { + It("should be valid for short enough to avoid production usage, but long enough for long-running tests", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate the serving certs") + + duration := time.Until(cert.Cert.NotAfter) + Expect(duration).To(BeNumerically("<=", 168*time.Hour), "not-after should be short-ish (<= 1 week)") + Expect(duration).To(BeNumerically(">=", 2*time.Hour), "not-after should be enough for long tests (couple of hours)") + }) + + Context("when encoding names", func() { + var cert certs.CertPair + BeforeEach(func() { + By("generating a serving cert with IPv4 & IPv6 addresses, and DNS names") + var err error + // IPs are in the "example & docs" blocks for IPv4 (TEST-NET-1) & IPv6 + cert, err = ca.NewServingCert("192.0.2.1", "localhost", "2001:db8::") + Expect(err).NotTo(HaveOccurred(), "should be able to create the serving certs") + }) + + It("should encode all non-IP names as DNS SANs", func() { + Expect(cert.Cert.DNSNames).To(ConsistOf("localhost")) + }) + + It("should encode all IP names as IP SANs", func() { + // NB(directxman12): this is non-exhaustive because we also + // convert DNS SANs to IPs too (see test below) + Expect(cert.Cert.IPAddresses).To(ContainElements( + // normalize the elements with To16 so we can compare them to the output of + // of ParseIP safely (the alternative is a custom matcher that calls Equal, + // but this is easier) + WithTransform(net.IP.To16, Equal(net.ParseIP("192.0.2.1"))), + WithTransform(net.IP.To16, Equal(net.ParseIP("2001:db8::"))), + )) + }) + + It("should add the corresponding IP address(es) (as IP SANs) for DNS names", func() { + // NB(directxman12): we currently fail if the lookup fails. + // I'm not certain this is the best idea (both the bailing on + // error and the actual idea), so if this causes issues, you + // might want to reconsider. + + localhostAddrs, err := net.LookupHost("localhost") + Expect(err).NotTo(HaveOccurred(), "should be able to find IPs for localhost") + localhostIPs := make([]interface{}, len(localhostAddrs)) + for i, addr := range localhostAddrs { + // normalize the elements with To16 so we can compare them to the output of + // of ParseIP safely (the alternative is a custom matcher that calls Equal, + // but this is easier) + localhostIPs[i] = WithTransform(net.IP.To16, Equal(net.ParseIP(addr))) + } + Expect(cert.Cert.IPAddresses).To(ContainElements(localhostIPs...)) + }) + }) + + It("should assume a name of localhost (DNS SAN) if no names are given", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert with the default name") + Expect(cert.Cert.DNSNames).To(ConsistOf("localhost"), "the default DNS name should be localhost") + + }) + + It("should be usable for server auth, verifying, and enciphering", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert") + + Expect(cert.Cert.KeyUsage&x509.KeyUsageKeyEncipherment).NotTo(Equal(0), "should be usable for key enciphering") + Expect(cert.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(Equal(0), "should be usable for signature verifying") + Expect(cert.Cert.ExtKeyUsage).To(ContainElement(x509.ExtKeyUsageServerAuth), "should be usable for server auth") + + }) + + It("should be signed by the CA", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert") + Expect(cert.Cert.CheckSignatureFrom(ca.CA.Cert)).To(Succeed()) + }) + }) + + Describe("Generated client certs", func() { + var cert certs.CertPair + BeforeEach(func() { + var err error + cert, err = ca.NewClientCert(certs.ClientInfo{ + Name: "user", + Groups: []string{"group1", "group2"}, + }) + Expect(err).NotTo(HaveOccurred(), "should be able to create a client cert") + }) + + It("should be valid for short enough to avoid production usage, but long enough for long-running tests", func() { + duration := time.Until(cert.Cert.NotAfter) + Expect(duration).To(BeNumerically("<=", 168*time.Hour), "not-after should be short-ish (<= 1 week)") + Expect(duration).To(BeNumerically(">=", 2*time.Hour), "not-after should be enough for long tests (couple of hours)") + }) + + It("should be usable for client auth, verifying, and enciphering", func() { + Expect(cert.Cert.KeyUsage&x509.KeyUsageKeyEncipherment).NotTo(Equal(0), "should be usable for key enciphering") + Expect(cert.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(Equal(0), "should be usable for signature verifying") + Expect(cert.Cert.ExtKeyUsage).To(ContainElement(x509.ExtKeyUsageClientAuth), "should be usable for client auth") + }) + + It("should encode the user name as the common name", func() { + Expect(cert.Cert.Subject.CommonName).To(Equal("user")) + }) + + It("should encode the groups as the organization values", func() { + Expect(cert.Cert.Subject.Organization).To(ConsistOf("group1", "group2")) + }) + + It("should be signed by the CA", func() { + Expect(cert.Cert.CheckSignatureFrom(ca.CA.Cert)).To(Succeed()) + }) + }) +}) + +var _ = Describe("Certificate Pairs", func() { + var pair certs.CertPair + BeforeEach(func() { + ca, err := certs.NewTinyCA() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a cert pair") + + pair = ca.CA + }) + + Context("when serializing just the public key", func() { + It("should serialize into a CERTIFICATE PEM block", func() { + bytes := pair.CertBytes() + Expect(bytes).NotTo(BeEmpty(), "should produce some cert bytes") + + block, rest := pem.Decode(bytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block).To(PointTo(MatchAllFields(Fields{ + "Type": Equal("CERTIFICATE"), + "Headers": BeEmpty(), + "Bytes": Equal(pair.Cert.Raw), + }))) + }) + }) + + Context("when serializing both parts", func() { + var certBytes, keyBytes []byte + BeforeEach(func() { + var err error + certBytes, keyBytes, err = pair.AsBytes() + Expect(err).NotTo(HaveOccurred(), "should be able to serialize the pair") + }) + + It("should serialize the private key in PKCS8 form in a PRIVATE KEY PEM block", func() { + Expect(keyBytes).NotTo(BeEmpty(), "should produce some key bytes") + + By("decoding & checking the PEM block") + block, rest := pem.Decode(keyBytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block.Type).To(Equal("PRIVATE KEY")) + + By("decoding & checking the PKCS8 data") + Expect(x509.ParsePKCS8PrivateKey(block.Bytes)).NotTo(BeNil(), "should be able to parse back the private key") + }) + + It("should serialize the public key into a CERTIFICATE PEM block", func() { + Expect(certBytes).NotTo(BeEmpty(), "should produce some cert bytes") + + block, rest := pem.Decode(certBytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block).To(PointTo(MatchAllFields(Fields{ + "Type": Equal("CERTIFICATE"), + "Headers": BeEmpty(), + "Bytes": Equal(pair.Cert.Raw), + }))) + }) + + }) +}) diff --git a/pkg/internal/testing/controlplane/apiserver.go b/pkg/internal/testing/controlplane/apiserver.go new file mode 100644 index 0000000000..c9a1a232ea --- /dev/null +++ b/pkg/internal/testing/controlplane/apiserver.go @@ -0,0 +1,468 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + // saKeyFile is the name of the service account signing private key file. + saKeyFile = "sa-signer.key" + // saKeyFile is the name of the service account signing public key (cert) file. + saCertFile = "sa-signer.crt" +) + +// SecureServing provides/configures how the API server serves on the secure port. +type SecureServing struct { + // ListenAddr contains the host & port to serve on. + // + // Configurable. If unset, it will be defaulted. + process.ListenAddr + // CA contains the CA that signed the API server's serving certificates. + // + // Read-only. + CA []byte + // Authn can be used to provision users, and override what type of + // authentication is used to provision users. + // + // Configurable. If unset, it will be defaulted. + Authn +} + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client + // connections. + // + // If set, this will configure the *insecure* serving details. + // If unset, it will contain the insecure port if insecure serving is enabled, + // and otherwise will contain the secure port. + // + // If this is not specified, we default to a random free port on localhost. + // + // Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally. + URL *url.URL + + // SecurePort is the additional secure port that the APIServer should listen on. + // + // If set, this will override SecureServing.Port. + // + // Deprecated: use SecureServing. + SecurePort int + + // SecureServing indicates how the API server will serve on the secure port. + // + // Some parts are configurable. Will be defaulted if unset. + SecureServing + + // InsecureServing indicates how the API server will serve on the insecure port. + // + // If unset, the insecure port will be disabled. Set to an empty struct to get + // default values. + // + // Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure + // serving instead. + InsecureServing *process.ListenAddr + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *process.State + + // args contains the structured arguments to use for running the API server + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch the API server. A set of defaults will +// be applied underneath. +func (s *APIServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) prepare() error { + if err := s.setProcessState(); err != nil { + return err + } + return s.Authn.Start() +} + +// configurePorts configures the serving ports for this API server. +// +// Most of this method currently deals with making the deprecated fields +// take precedence over the new fields. +func (s *APIServer) configurePorts() error { + // prefer the old fields to the new fields if a user set one, + // otherwise, default the new fields and populate the old ones. + + // Insecure: URL, InsecureServing + if s.URL != nil { + s.InsecureServing = &process.ListenAddr{ + Address: s.URL.Hostname(), + Port: s.URL.Port(), + } + } else if insec := s.InsecureServing; insec != nil { + if insec.Port == "" || insec.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused insecure port: %w", err) + } + s.InsecureServing.Port = strconv.Itoa(port) + s.InsecureServing.Address = host + } + s.URL = s.InsecureServing.URL("http", "") + } + + // Secure: SecurePort, SecureServing + if s.SecurePort != 0 { + s.SecureServing.Port = strconv.Itoa(s.SecurePort) + // if we don't have an address, try the insecure address, and otherwise + // default to loopback. + if s.SecureServing.Address == "" { + if s.InsecureServing != nil { + s.SecureServing.Address = s.InsecureServing.Address + } else { + s.SecureServing.Address = "127.0.0.1" + } + } + } else if s.SecureServing.Port == "" || s.SecureServing.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused secure port: %w", err) + } + s.SecureServing.Port = strconv.Itoa(port) + s.SecureServing.Address = host + s.SecurePort = port + } + + return nil +} + +func (s *APIServer) setProcessState() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + s.processState = &process.State{ + Dir: s.CertDir, + Path: s.Path, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + } + if err := s.processState.Init("kube-apiserver"); err != nil { + return err + } + + if err := s.configurePorts(); err != nil { + return err + } + + // the secure port will always be on, so use that + s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz") + + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + if err := s.populateAPIServerCerts(); err != nil { + return err + } + + if s.SecureServing.Authn == nil { + authn, err := NewCertAuthn() + if err != nil { + return err + } + s.SecureServing.Authn = authn + } + + if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil { + return err + } + + // NB(directxman12): insecure port is a mess: + // - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to + // disable it, otherwise the default will be used and we'll conflict. + // - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it + // - 1.24 won't have the flag at all... + // + // In an effort to automatically do the right thing during this mess, we do feature discovery + // on the flags, and hope that we've "parsed" them properly. + // + // TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out, + // might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's + // default args. + if err := s.discoverFlags(); err != nil { + return err + } + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{ + // as per kubernetes-sigs/controller-runtime#641, we need this (we + // probably need other stuff too, but this is the only thing that was + // previously considered a "minimal default") + "service-cluster-ip-range": {"10.0.0.0/24"}, + + // we need *some* authorization mode for health checks on the secure port, + // so default to RBAC unless the user set something else (in which case + // this'll be ignored due to SliceToArguments using AppendNoDefaults). + "authorization-mode": {"RBAC"}, + }, + }) + if err != nil { + return err + } + + return nil +} + +// discoverFlags checks for certain flags that *must* be set in certain +// versions, and *must not* be set in others. +func (s *APIServer) discoverFlags() error { + // Present: <1.24, Absent: >= 1.24 + present, err := s.processState.CheckFlag("insecure-port") + if err != nil { + return err + } + + if !present { + s.Configure().Disable("insecure-port") + } + + return nil +} + +func (s *APIServer) defaultArgs() map[string][]string { + args := map[string][]string{ + "service-cluster-ip-range": {"10.0.0.0/24"}, + "allow-privileged": {"true"}, + // we're keeping this disabled because if enabled, default SA is + // missing which would force all tests to create one in normal + // apiserver operation this SA is created by controller, but that is + // not run in integration environment + "disable-admission-plugins": {"ServiceAccount"}, + "cert-dir": {s.CertDir}, + "authorization-mode": {"RBAC"}, + "secure-port": {s.SecureServing.Port}, + // NB(directxman12): previously we didn't set the bind address for the secure + // port. It *shouldn't* make a difference unless people are doing something really + // funky, but if you start to get bug reports look here ;-) + "bind-address": {s.SecureServing.Address}, + + // required on 1.20+, fine to leave on for <1.20 + "service-account-issuer": {s.SecureServing.URL("https", "/").String()}, + "service-account-key-file": {filepath.Join(s.CertDir, saCertFile)}, + "service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)}, + } + if s.EtcdURL != nil { + args["etcd-servers"] = []string{s.EtcdURL.String()} + } + if s.URL != nil { + args["insecure-port"] = []string{s.URL.Port()} + args["insecure-bind-address"] = []string{s.URL.Hostname()} + } else { + // TODO(directxman12): remove this once 1.21 is the lowest version we support + // (this might be a while, but this line'll break as of 1.24, so see the comment + // in Start + args["insecure-port"] = []string{"0"} + } + return args +} + +func (s *APIServer) populateAPIServerCerts() error { + _, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt")) + if !os.IsNotExist(statErr) { + return statErr + } + + ca, err := certs.NewTinyCA() + if err != nil { + return err + } + + servingCerts, err := ca.NewServingCert() + if err != nil { + return err + } + + certData, keyData, err := servingCerts.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec + return err + } + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec + return err + } + + s.SecureServing.CA = ca.CA.CertBytes() + + // service account signing files too + saCA, err := certs.NewTinyCA() + if err != nil { + return err + } + + saCert, saKey, err := saCA.CA.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec + return err + } + return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + if s.processState != nil { + if s.processState.DirNeedsCleaning { + s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + if err := s.processState.Stop(); err != nil { + return err + } + } + return s.Authn.Stop() +} + +// APIServerDefaultArgs exposes the default args for the APIServer so that you +// can use those to append your own additional arguments. +// +// Note that these arguments don't handle newer API servers well to due the more +// complex feature detection neeeded. It's recommended that you switch to .Configure +// as you upgrade API server versions. +// +// Deprecated: use APIServer.Configure(). +var APIServerDefaultArgs = []string{ + "--advertise-address=127.0.0.1", + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}", + "{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", + "--service-cluster-ip-range=10.0.0.0/24", + "--allow-privileged=true", + // NB(directxman12): we also enable RBAC if nothing else was enabled +} + +// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. This saves time on tests. +// +// NB(directxman12): do not expose this outside of internal -- it's unsafe to +// use, because things like port allocation could race even more than they +// currently do if you later call start! +func PrepareAPIServer(s *APIServer) error { + return s.prepare() +} + +// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. It's public to make testing easier. +// +// NB(directxman12): do not expose this outside of internal. +func APIServerArguments(s *APIServer) []string { + return s.processState.Args +} diff --git a/pkg/internal/testing/controlplane/apiserver_test.go b/pkg/internal/testing/controlplane/apiserver_test.go new file mode 100644 index 0000000000..b857220203 --- /dev/null +++ b/pkg/internal/testing/controlplane/apiserver_test.go @@ -0,0 +1,295 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "errors" + "net/url" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("APIServer", func() { + var server *APIServer + BeforeEach(func() { + server = &APIServer{ + EtcdURL: &url.URL{}, + } + }) + JustBeforeEach(func() { + Expect(PrepareAPIServer(server)).To(Succeed()) + }) + Describe("setting up serving hosts & ports", func() { + Context("when URL is set", func() { + BeforeEach(func() { + server.URL = &url.URL{Scheme: "http", Host: "localhost:8675", Path: "/some-path"} + }) + + Context("when insecure serving is also set", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{ + Address: "localhost", + Port: "1234", + } + }) + + It("should override the existing insecure serving", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + }) + + It("should set insecure serving off of that", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + + It("should keep URL as-is", func() { + Expect(server.URL.String()).To(Equal("http://localhost:8675/some-path")) + }) + }) + + Context("when URL is not set but InsecureServing is set", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{} + }) + + Context("when host and port are set", func() { + BeforeEach(func() { + server.InsecureServing.Address = "localhost" + server.InsecureServing.Port = "8675" + }) + It("should set URL from InsecureServing", func() { + Expect(server.URL.String()).To(Equal("http://localhost:8675")) + }) + + It("should leave InsecureServing as-is if address and port are filled out", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + }) + + Context("when address and port are not filled out", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{} + }) + It("should default an insecure port", func() { + Expect(server.InsecureServing.Port).NotTo(BeEmpty()) + }) + It("should set URL from InsecureServing", func() { + Expect(server.URL.String()).To(Equal("http://" + server.InsecureServing.Address + ":" + server.InsecureServing.Port)) + }) + }) + }) + + Context("when neither URL or InsecureServing are set", func() { + It("should not default either of them", func() { + Expect(server.URL).To(BeNil(), "no URL should be set") + Expect(server.InsecureServing).To(BeNil(), "no insecure serving details should be set") + }) + }) + + Context("when SecureServing host & port are set", func() { + BeforeEach(func() { + server.Address = "localhost" + server.Port = "8675" + }) + + It("should leave SecureServing as-is", func() { + Expect(server.SecureServing.Address).To(Equal("localhost")) + Expect(server.SecureServing.Port).To(Equal("8675")) + }) + }) + + Context("when SecureServing is not set", func() { + It("should be defaulted with a random port", func() { + Expect(server.Port).NotTo(Equal(0)) + }) + }) + }) + + It("should default authn if not set", func() { + Expect(server.Authn).NotTo(BeNil()) + }) + + Describe("argument defaulting", func() { + // NB(directxman12): most of the templating vs configure logic is tested + // in arguments/arguments_test.go, so just test secure vs insecure port logic here + + Context("when insecure serving is set, on a binary that supports it", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{ + Address: "localhost", + Port: "8675", + } + server.Path = "./testdata/fake-1.19-apiserver.sh" + }) + It("should set the insecure-port and insecure-bind-address fields from insecureserving", func() { + Expect(APIServerArguments(server)).To(ContainElements( + "--insecure-port=8675", + "--insecure-bind-address=localhost", + )) + }) + }) + + Context("when insecureserving is disabled, on binaries with no insecure-port flag", func() { + BeforeEach(func() { + server.Path = "./testdata/fake-1.20-apiserver.sh" + }) + It("should not try to explicitly disable the insecure port", func() { + Expect(APIServerArguments(server)).NotTo(ContainElement(HavePrefix("--insecure-port"))) + }) + }) + + Context("when insecureserving is disabled, on binaries with an insecure-port flag", func() { + BeforeEach(func() { + server.Path = "./testdata/fake-1.19-apiserver.sh" + }) + It("should explicitly disable the insecure port", func() { + Expect(APIServerArguments(server)).To(ContainElement("--insecure-port=0")) + }) + }) + + Context("when given legacy-style template arguments", func() { + BeforeEach(func() { + server.Args = []string{"--foo=bar", "--baz={{ .Port }}"} + }) + It("should use the passed in args with the minimal required defaults", func() { + Expect(APIServerArguments(server)).To(ConsistOf( + "--foo=bar", + MatchRegexp(`--baz=\d+`), + "--service-cluster-ip-range=10.0.0.0/24", + MatchRegexp("--client-ca-file=.+"), + "--authorization-mode=RBAC", + )) + }) + }) + }) + + Describe("setting up auth", func() { + var auth *fakeAuthn + BeforeEach(func() { + auth = &fakeAuthn{ + setFlag: true, + } + server.Authn = auth + }) + It("should configure with the cert dir", func() { + Expect(auth.workDir).To(Equal(server.CertDir)) + }) + It("should pass its args to be configured", func() { + Expect(server.Configure().Get("configure-called").Get(nil)).To(ConsistOf("true")) + }) + + Context("when configuring auth errors out", func() { + It("should fail to configure", func() { + server := &APIServer{ + EtcdURL: &url.URL{}, + SecureServing: SecureServing{ + Authn: auth, + }, + } + auth.configureErr = errors.New("Oh no") + Expect(PrepareAPIServer(server)).NotTo(Succeed()) + }) + }) + }) + + Describe("managing", func() { + // some of these tests are combined for speed reasons -- starting the apiserver + // takes a while, relatively speaking + + var ( + auth *fakeAuthn + etcd *Etcd + ) + BeforeEach(func() { + etcd = &Etcd{} + Expect(etcd.Start()).To(Succeed()) + server.EtcdURL = etcd.URL + + auth = &fakeAuthn{} + server.Authn = auth + }) + AfterEach(func() { + Expect(etcd.Stop()).To(Succeed()) + }) + + Context("after starting", func() { + BeforeEach(func() { + Expect(server.Start()).To(Succeed()) + }) + + It("should stop successfully, and stop auth", func() { + Expect(server.Stop()).To(Succeed()) + Expect(auth.stopCalled).To(BeTrue()) + }) + }) + + It("should fail to start when auth fails to start", func() { + auth.startErr = errors.New("Oh no") + Expect(server.Start()).NotTo(Succeed()) + }) + + It("should start successfully & start auth", func() { + Expect(server.Start()).To(Succeed()) + defer func() { Expect(server.Stop()).To(Succeed()) }() + Expect(auth.startCalled).To(BeTrue()) + }) + }) +}) + +type fakeAuthn struct { + workDir string + + startCalled bool + stopCalled bool + setFlag bool + + configureErr error + startErr error +} + +func (f *fakeAuthn) Configure(workDir string, args *process.Arguments) error { + f.workDir = workDir + if f.setFlag { + args.Set("configure-called", "true") + } + return f.configureErr +} +func (f *fakeAuthn) Start() error { + f.startCalled = true + return f.startErr +} +func (f *fakeAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + return nil, nil +} +func (f *fakeAuthn) Stop() error { + f.stopCalled = true + return nil +} diff --git a/pkg/internal/testing/controlplane/auth.go b/pkg/internal/testing/controlplane/auth.go new file mode 100644 index 0000000000..16c86a712c --- /dev/null +++ b/pkg/internal/testing/controlplane/auth.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "os" + "path/filepath" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// User represents a Kubernetes user. +type User struct { + // Name is the user's Name. + Name string + // Groups are the groups to which the user belongs. + Groups []string +} + +// Authn knows how to configure an API server for a particular type of authentication, +// and provision users under that authentication scheme. +// +// The methods must be called in the following order (as presented below in the interface +// for a mnemonic): +// +// 1. Configure +// 2. Start +// 3. AddUsers (0+ calls) +// 4. Stop. +type Authn interface { + // Configure provides the working directory to this authenticator, + // and configures the given API server arguments to make use of this authenticator. + // + // Should be called first. + Configure(workDir string, args *process.Arguments) error + // Start runs this authenticator. Will be called just before API server start. + // + // Must be called after Configure. + Start() error + // AddUser provisions a user, returning a copy of the given base rest.Config + // configured to authenticate as that users. + // + // May only be called while the authenticator is "running". + AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) + // Stop shuts down this authenticator. + Stop() error +} + +// CertAuthn is an authenticator (Authn) that makes use of client certificate authn. +type CertAuthn struct { + // ca is the CA used to sign the client certs + ca *certs.TinyCA + // certDir is the directory used to write the CA crt file + // so that the API server can read it. + certDir string +} + +// NewCertAuthn creates a new client-cert-based Authn with a new CA. +func NewCertAuthn() (*CertAuthn, error) { + ca, err := certs.NewTinyCA() + if err != nil { + return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err) + } + return &CertAuthn{ + ca: ca, + }, nil +} + +// AddUser provisions a new user that's authenticated via certificates, with +// the given uesrname and groups embedded in the certificate as expected by the +// API server. +func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + certs, err := c.ca.NewClientCert(certs.ClientInfo{ + Name: user.Name, + Groups: user.Groups, + }) + if err != nil { + return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err) + } + + crt, key, err := certs.AsBytes() + if err != nil { + return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err) + } + + cfg := rest.CopyConfig(baseCfg) + cfg.CertData = crt + cfg.KeyData = key + + return cfg, nil +} + +// caCrtPath returns the path to the on-disk client-cert CA crt file. +func (c *CertAuthn) caCrtPath() string { + return filepath.Join(c.certDir, "client-cert-auth-ca.crt") +} + +// Configure provides the working directory to this authenticator, +// and configures the given API server arguments to make use of this authenticator. +func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error { + c.certDir = workDir + args.Set("client-ca-file", c.caCrtPath()) + return nil +} + +// Start runs this authenticator. Will be called just before API server start. +// +// Must be called after Configure. +func (c *CertAuthn) Start() error { + if len(c.certDir) == 0 { + return fmt.Errorf("start called before configure") + } + caCrt := c.ca.CA.CertBytes() + if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec + return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) + } + + return nil +} + +// Stop shuts down this authenticator. +func (c *CertAuthn) Stop() error { + // no-op -- our workdir is cleaned up for us automatically + return nil +} diff --git a/pkg/internal/testing/controlplane/auth_test.go b/pkg/internal/testing/controlplane/auth_test.go new file mode 100644 index 0000000000..3acbc3d3c4 --- /dev/null +++ b/pkg/internal/testing/controlplane/auth_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "crypto/tls" + "crypto/x509" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + kcert "k8s.io/client-go/util/cert" + + cp "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("Cert Authentication", func() { + var authn *cp.CertAuthn + BeforeEach(func() { + var err error + authn, err = cp.NewCertAuthn() + Expect(err).NotTo(HaveOccurred(), "should be able to create the cert authn") + }) + Context("when starting", func() { + It("should write the verifying CA to the configured directory", func() { + By("setting up a temp dir") + dir, err := os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + if dir != "" { + defer os.RemoveAll(dir) + } + + By("configuring to use that dir") + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + + By("starting and checking the dir") + Expect(authn.Start()).To(Succeed()) + defer func() { Expect(authn.Stop()).To(Succeed()) }() // not strictly necessary, but future-proof + + _, err = os.Stat(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should error out if we haven't been configured yet", func() { + // NB(directxman12): no configure here intentionally + Expect(authn.Start()).NotTo(Succeed()) + }) + }) + Context("when configuring", func() { + It("should have set up the API server to use the written file for client cert auth", func() { + args := process.EmptyArguments() + Expect(authn.Configure("/tmp/____doesnotexist", args)).To(Succeed()) + Expect(args.Get("client-ca-file").Get(nil)).To(ConsistOf("/tmp/____doesnotexist/client-cert-auth-ca.crt")) + }) + }) + + Describe("creating users", func() { + user := cp.User{Name: "someuser", Groups: []string{"group1", "group2"}} + + Context("before starting", func() { + It("should yield a REST config that contains certs valid for the to-be-written CA", func() { + cfg, err := authn.AddUser(user, &rest.Config{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + Expect(cfg.CertData).NotTo(BeEmpty()) + Expect(cfg.KeyData).NotTo(BeEmpty()) + + // double-check the cert (assume the key is fine if it's present + // and the cert is also present, cause it's more annoying to verify + // and we have separate tinyca & integration tests. + By("parsing the config's cert & key data") + certs, err := tls.X509KeyPair(cfg.CertData, cfg.KeyData) + Expect(err).NotTo(HaveOccurred(), "config cert/key data should be valid key pair") + cert, err := x509.ParseCertificate(certs.Certificate[0]) // re-parse cause .Leaf isn't saved + Expect(err).NotTo(HaveOccurred()) + + By("starting and loading the CA cert") + dir, err := os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + if dir != "" { + defer os.RemoveAll(dir) + } + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + Expect(authn.Start()).To(Succeed()) + caCerts, err := kcert.CertsFromFile(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred(), "should be able to read the CA cert file))))") + Expect(cert.CheckSignatureFrom(caCerts[0])).To(Succeed(), "the config's cert should be signed by the written CA") + }) + + It("should copy the configuration from the base CA without modifying it", func() { + By("creating a user and checking the output config") + base := &rest.Config{Burst: 30} + cfg, err := authn.AddUser(user, base) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + Expect(cfg.Burst).To(Equal(30)) + + By("mutating the base and verifying the cfg doesn't change") + base.Burst = 8675 + Expect(cfg.Burst).To(Equal(30)) + }) + }) + + Context("after starting", func() { + var dir string + BeforeEach(func() { + By("setting up a temp dir & starting with it") + var err error + dir, err = os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + Expect(authn.Start()).To(Succeed()) + }) + AfterEach(func() { + if dir != "" { + defer os.RemoveAll(dir) + } + }) + + It("should yield a REST config that contains certs valid for the written CA", func() { + cfg, err := authn.AddUser(user, &rest.Config{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + Expect(cfg.CertData).NotTo(BeEmpty()) + Expect(cfg.KeyData).NotTo(BeEmpty()) + + // double-check the cert (assume the key is fine if it's present + // and the cert is also present, cause it's more annoying to verify + // and we have separate tinyca & integration tests. + By("parsing the config's cert & key data") + certs, err := tls.X509KeyPair(cfg.CertData, cfg.KeyData) + Expect(err).NotTo(HaveOccurred(), "config cert/key data should be valid key pair") + cert, err := x509.ParseCertificate(certs.Certificate[0]) // re-parse cause .Leaf isn't saved + Expect(err).NotTo(HaveOccurred()) + + By("loading the CA cert") + caCerts, err := kcert.CertsFromFile(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred(), "should be able to read the CA cert file))))") + Expect(cert.CheckSignatureFrom(caCerts[0])).To(Succeed(), "the config's cert should be signed by the written CA") + }) + + It("should copy the configuration from the base CA without modifying it", func() { + By("creating a user and checking the output config") + base := &rest.Config{Burst: 30} + cfg, err := authn.AddUser(user, base) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + Expect(cfg.Burst).To(Equal(30)) + + By("mutating the base and verifying the cfg doesn't change") + base.Burst = 8675 + Expect(cfg.Burst).To(Equal(30)) + }) + }) + }) +}) diff --git a/pkg/internal/testing/controlplane/controlplane_suite_test.go b/pkg/internal/testing/controlplane/controlplane_suite_test.go new file mode 100644 index 0000000000..067b0c40ce --- /dev/null +++ b/pkg/internal/testing/controlplane/controlplane_suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestIntegration(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + suiteName := "Control Plane Standup Unit Tests" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/internal/testing/controlplane/etcd.go b/pkg/internal/testing/controlplane/etcd.go new file mode 100644 index 0000000000..c30d213295 --- /dev/null +++ b/pkg/internal/testing/controlplane/etcd.go @@ -0,0 +1,202 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "io" + "net" + "net/url" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + // processState contains the actual details about this running process + processState *process.State + + // args contains the structured arguments to use for running etcd. + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments + + // listenPeerURL is the address the Etcd should listen on for peer connections. + // It's automatically generated and a random port is picked during execution. + listenPeerURL *url.URL +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occurred. +func (e *Etcd) Start() error { + if err := e.setProcessState(); err != nil { + return err + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { + e.processState = &process.State{ + Dir: e.DataDir, + Path: e.Path, + StartTimeout: e.StartTimeout, + StopTimeout: e.StopTimeout, + } + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + if err := e.processState.Init("etcd"); err != nil { + return err + } + + // Set the listen url. + if e.URL == nil { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.URL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // Set the listen peer URL. + { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.listenPeerURL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // can use /health as of etcd 3.3.0 + e.processState.HealthCheck.URL = *e.URL + e.processState.HealthCheck.Path = "/health" + + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + var err error + e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: e, + Defaults: e.defaultArgs(), + }) + return err +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + if e.processState.DirNeedsCleaning { + e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + return e.processState.Stop() +} + +func (e *Etcd) defaultArgs() map[string][]string { + args := map[string][]string{ + "listen-peer-urls": {e.listenPeerURL.String()}, + "data-dir": {e.DataDir}, + } + if e.URL != nil { + args["advertise-client-urls"] = []string{e.URL.String()} + args["listen-client-urls"] = []string{e.URL.String()} + } + + // Add unsafe no fsync, available from etcd 3.5 + if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok { + args["unsafe-no-fsync"] = []string{"true"} + } + return args +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch etcd. A set of defaults will +// be applied underneath. +func (e *Etcd) Configure() *process.Arguments { + if e.args == nil { + e.args = process.EmptyArguments() + } + return e.args +} + +// EtcdDefaultArgs exposes the default args for Etcd so that you +// can use those to append your own additional arguments. +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} diff --git a/pkg/internal/testing/controlplane/etcd_test.go b/pkg/internal/testing/controlplane/etcd_test.go new file mode 100644 index 0000000000..e9a1f7a181 --- /dev/null +++ b/pkg/internal/testing/controlplane/etcd_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("etcd", func() { + // basic coherence test + It("should start and stop successfully", func() { + etcd := &Etcd{} + Expect(etcd.Start()).To(Succeed()) + defer func() { + Expect(etcd.Stop()).To(Succeed()) + }() + Expect(etcd.URL).NotTo(BeNil()) + }) +}) diff --git a/pkg/internal/testing/controlplane/kubectl.go b/pkg/internal/testing/controlplane/kubectl.go new file mode 100644 index 0000000000..a27b7a0ff8 --- /dev/null +++ b/pkg/internal/testing/controlplane/kubectl.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os/exec" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + kcapi "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + envtestName = "envtest" +) + +// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config. +// The options are tailored towards the rest.Configs we generate, so they're +// not broadly applicable. +// +// This is not intended to be exposed beyond internal for the above reasons. +func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) { + kubeConfig := kcapi.NewConfig() + protocol := "https" + if !rest.IsConfigTransportTLS(*cfg) { + protocol = "http" + } + + // cfg.Host is a URL, so we need to parse it so we can properly append the API path + baseURL, err := url.Parse(cfg.Host) + if err != nil { + return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err) + } + + kubeConfig.Clusters[envtestName] = &kcapi.Cluster{ + // TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config), + // we can just use that. Note that this is not the same as the public DefaultServerURL, + // which requires us to pass a bunch of stuff in manually. + Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(), + CertificateAuthorityData: cfg.CAData, + } + kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{ + // try to cover all auth strategies that aren't plugins + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + Token: cfg.BearerToken, + Username: cfg.Username, + Password: cfg.Password, + } + kcCtx := kcapi.NewContext() + kcCtx.Cluster = envtestName + kcCtx.AuthInfo = envtestName + kubeConfig.Contexts[envtestName] = kcCtx + kubeConfig.CurrentContext = envtestName + + contents, err := clientcmd.Write(*kubeConfig) + if err != nil { + return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err) + } + return contents, nil +} + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = process.BinPathFinder("kubectl", "") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/pkg/internal/testing/controlplane/kubectl_test.go b/pkg/internal/testing/controlplane/kubectl_test.go new file mode 100644 index 0000000000..c09695eecb --- /dev/null +++ b/pkg/internal/testing/controlplane/kubectl_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "io" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + ccapi "k8s.io/client-go/tools/clientcmd/api" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("Kubectl", func() { + It("runs kubectl", func() { + k := &KubeCtl{Path: "bash"} + args := []string{"-c", "echo 'something'"} + stdout, stderr, err := k.Run(args...) + Expect(err).NotTo(HaveOccurred()) + Expect(stdout).To(ContainSubstring("something")) + bytes, err := io.ReadAll(stderr) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).To(BeEmpty()) + }) + + Context("when the command returns a non-zero exit code", func() { + It("returns an error", func() { + k := &KubeCtl{Path: "bash"} + args := []string{ + "-c", "echo 'this is StdErr' >&2; echo 'but this is StdOut' >&1; exit 66", + } + + stdout, stderr, err := k.Run(args...) + + Expect(err).To(MatchError(ContainSubstring("exit status 66"))) + + Expect(stdout).To(ContainSubstring("but this is StdOut")) + Expect(stderr).To(ContainSubstring("this is StdErr")) + }) + }) +}) + +var _ = Describe("KubeConfigFromREST", func() { + var ( + restCfg *rest.Config + rawCfg []byte + cfg *ccapi.Config + ) + + BeforeEach(func() { + restCfg = &rest.Config{ + Host: "https://some-host:8675", + APIPath: "/some-prefix", + TLSClientConfig: rest.TLSClientConfig{ + CertData: []byte("cert"), + KeyData: []byte("key"), + CAData: []byte("ca-cert"), + }, + BearerToken: "some-tok", + Username: "some-user", + Password: "some-password", + } + }) + + JustBeforeEach(func() { + var err error + rawCfg, err = KubeConfigFromREST(restCfg) + Expect(err).NotTo(HaveOccurred(), "should be able to convert & serialize the kubeconfig") + + cfg, err = clientcmd.Load(rawCfg) + Expect(err).NotTo(HaveOccurred(), "should be able to deserialize the generated kubeconfig") + }) + + It("should set up a context, and set it as the current one", func() { + By("checking that the current context exists") + Expect(cfg.CurrentContext).NotTo(BeEmpty(), "should have a current context") + Expect(cfg.Contexts).To(HaveKeyWithValue(cfg.CurrentContext, Not(BeNil())), "the current context should exist as a context") + + By("checking that it points to valid info") + currCtx := cfg.Contexts[cfg.CurrentContext] + Expect(currCtx).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "Cluster": Not(BeEmpty()), + "AuthInfo": Not(BeEmpty()), + }))) + + Expect(cfg.Clusters).To(HaveKeyWithValue(currCtx.Cluster, Not(BeNil())), "should point to a cluster") + Expect(cfg.AuthInfos).To(HaveKeyWithValue(currCtx.AuthInfo, Not(BeNil())), "should point to a user") + }) + + Context("when no TLS is enabled", func() { + BeforeEach(func() { + restCfg.Host = "http://some-host:8675" + restCfg.TLSClientConfig = rest.TLSClientConfig{} + }) + + It("should use http in the server url", func() { + cluster := cfg.Clusters[cfg.Contexts[cfg.CurrentContext].Cluster] + Expect(cluster.Server).To(HavePrefix("http://")) + }) + }) + + It("configure the current context to point to the given REST config's server, with CA data", func() { + cluster := cfg.Clusters[cfg.Contexts[cfg.CurrentContext].Cluster] + Expect(cluster).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "Server": Equal("https://some-host:8675/some-prefix"), + "CertificateAuthorityData": Equal([]byte("ca-cert")), + }))) + }) + + It("should copy all non-plugin auth info over", func() { + user := cfg.AuthInfos[cfg.Contexts[cfg.CurrentContext].AuthInfo] + Expect(user).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "ClientCertificateData": Equal([]byte("cert")), + "ClientKeyData": Equal([]byte("key")), + "Token": Equal("some-tok"), + "Username": Equal("some-user"), + "Password": Equal("some-password"), + }))) + }) +}) diff --git a/pkg/internal/testing/controlplane/plane.go b/pkg/internal/testing/controlplane/plane.go new file mode 100644 index 0000000000..456183a7a3 --- /dev/null +++ b/pkg/internal/testing/controlplane/plane.go @@ -0,0 +1,259 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "net/url" + "os" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +var NewTinyCA = certs.NewTinyCA + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd + + // Kubectl will override the default asset search path for kubectl + KubectlPath string + + // for the deprecated methods (Kubectl, etc) + defaultUserCfg *rest.Config + defaultUserKubectl *KubeCtl +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() (retErr error) { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.Etcd.Stop() + } + }() + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + if err := f.APIServer.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.APIServer.Stop() + } + }() + + // provision the default user -- can be removed when the related + // methods are removed. The default user has admin permissions to + // mimic legacy no-authz setups. + user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{}) + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) user: %w", err) + } + kubectl, err := user.Kubectl() + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err) + } + f.defaultUserCfg = user.Config() + f.defaultUserKubectl = kubectl + return nil +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + var errList []error + + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + errList = append(errList, err) + } + } + + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +// APIURL returns the URL you should connect to to talk to your API server. +// +// If insecure serving is configured, this will contain the insecure port. +// Otherwise, it will contain the secure port. +// +// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if +// you really want just the URL. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead. +func (f *ControlPlane) KubeCtl() *KubeCtl { + return f.defaultUserKubectl +} + +// RESTClientConfig returns a pre-configured restconfig, ready to connect to +// this ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Config instead. +func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) { + return f.defaultUserCfg, nil +} + +// AuthenticatedUser contains access information for an provisioned user, +// including REST config, kubeconfig contents, and access to a KubeCtl instance. +// +// It's not "safe" to use the methods on this till after the API server has been +// started (due to certificate initialization and such). The various methods will +// panic if this is done. +type AuthenticatedUser struct { + // cfg is the rest.Config for connecting to the API server. It's lazily initialized. + cfg *rest.Config + // cfgIsComplete indicates the cfg has had late-initialized fields (e.g. + // API server CA data) initialized. + cfgIsComplete bool + + // apiServer is a handle to the APIServer that's used when finalizing cfg + // and producing the kubectl instance. + plane *ControlPlane + + // kubectl is our existing, provisioned kubectl. We don't provision one + // till someone actually asks for it. + kubectl *KubeCtl +} + +// Config returns the REST config that can be used to connect to the API server +// as this user. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Config() *rest.Config { + // NB(directxman12): we choose to panic here for ergonomics sake, and because there's + // not really much you can do to "handle" this error. This machinery is intended to be + // used in tests anyway, so panicing is not a particularly big deal. + if u.cfgIsComplete { + return u.cfg + } + if len(u.plane.APIServer.SecureServing.CA) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + u.cfg.CAData = u.plane.APIServer.SecureServing.CA + u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String() + u.cfgIsComplete = true + return u.cfg +} + +// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config. +// +// Will panic if used before the API server is started. +func (u AuthenticatedUser) KubeConfig() ([]byte, error) { + // NB(directxman12): we don't return the actual API object to avoid yet another + // piece of kubernetes API in our public API, and also because generally the thing + // you want to do with this is just write it out to a file for external debugging + // purposes, etc. + return KubeConfigFromREST(u.Config()) +} + +// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses +// a kubeconfig equivalent to that returned by .KubeConfig. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) { + if u.kubectl != nil { + return u.kubectl, nil + } + if len(u.plane.APIServer.CertDir) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + // cleaning this up is handled when our tmpDir is deleted + out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg") + if err != nil { + return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err) + } + defer out.Close() + contents, err := KubeConfigFromREST(u.Config()) + if err != nil { + return nil, err + } + if _, err := out.Write(contents); err != nil { + return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err) + } + k := &KubeCtl{ + Path: u.plane.KubectlPath, + } + k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name())) + u.kubectl = k + return k, nil +} + +// AddUser provisions a new user in the cluster. It uses the APIServer's authentication +// strategy -- see APIServer.SecureServing.Authn. +// +// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no +// particular opinions about the config. +// +// The default authentication strategy is not guaranteed to any specific strategy, but it is +// guaranteed to be callable both before and after Start has been called (but, as noted in the +// AuthenticatedUser docs, the given user objects are only valid after Start has been called). +func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + if f.GetAPIServer().SecureServing.Authn == nil { + return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?") + } + + if baseConfig == nil { + baseConfig = &rest.Config{} + } + cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig) + if err != nil { + return nil, err + } + + return &AuthenticatedUser{ + cfg: cfg, + plane: f, + }, nil +} + +// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary. +func (f *ControlPlane) GetAPIServer() *APIServer { + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + return f.APIServer +} diff --git a/pkg/internal/testing/controlplane/plane_test.go b/pkg/internal/testing/controlplane/plane_test.go new file mode 100644 index 0000000000..714e76e8a4 --- /dev/null +++ b/pkg/internal/testing/controlplane/plane_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + kauthn "k8s.io/api/authorization/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("Control Plane", func() { + It("should start and stop successfully with a default etcd & apiserver", func() { + plane := &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + }) + It("should use the given etcd & apiserver when starting, if present", func() { + apiServer := &APIServer{} + etcd := &Etcd{} + plane := &ControlPlane{ + APIServer: apiServer, + Etcd: etcd, + } + Expect(plane.Start()).To(Succeed()) + defer func() { Expect(plane.Stop()).To(Succeed()) }() + + Expect(plane.APIServer).To(BeIdenticalTo(apiServer)) + Expect(plane.Etcd).To(BeIdenticalTo(etcd)) + }) + + It("should be able to restart", func() { + // NB(directxman12): currently restarting invalidates all current users + // when using CertAuthn. We need to support restarting as per our previous + // contract, but it's not clear how much else we actually need to handle, or + // whether or not this is a safe operation. + plane := &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + }) + + Context("after having started", func() { + var plane *ControlPlane + BeforeEach(func() { + plane = &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + }) + AfterEach(func() { + Expect(plane.Stop()).To(Succeed()) + }) + + It("should provision a working legacy user and legacy kubectl", func() { + By("grabbing the legacy kubectl") + Expect(plane.KubeCtl()).NotTo(BeNil()) + + By("grabbing the legacy REST config and testing it") + cfg, err := plane.RESTClientConfig() + Expect(err).NotTo(HaveOccurred(), "should be able to grab the legacy REST config") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred(), "should be able to create a client") + + sar := &kauthn.SelfSubjectAccessReview{ + Spec: kauthn.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &kauthn.ResourceAttributes{ + Verb: "*", + Group: "*", + Version: "*", + Resource: "*", + }, + }, + } + Expect(cl.Create(context.Background(), sar)).To(Succeed(), "should be able to make a Self-SAR") + Expect(sar.Status.Allowed).To(BeTrue(), "admin user should be able to do everything") + }) + + // TODO(directxman12): more explicit tests for AddUser -- it's tested indirectly via the + // legacy user flow, but we should be explicit + + Describe("adding users", func() { + PIt("should be able to provision new users that have a corresponding REST config and & kubectl", func() { + + }) + + PIt("should produce a default base REST config if none is given to add", func() { + + }) + }) + }) +}) diff --git a/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh b/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh new file mode 100755 index 0000000000..8b71661185 --- /dev/null +++ b/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh @@ -0,0 +1,312 @@ +#!/usr/bin/env sh + +cat </=true|false for a specific API group and version (e.g. apps/v1=true) + api/all=true|false controls all API versions + api/ga=true|false controls all API versions of the form v[0-9]+ + api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+ + api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+ + api/legacy is deprecated, and will be removed in a future version + +Egress selector flags: + + --egress-selector-config-file string File with apiserver egress selector configuration. + +Admission flags: + + --admission-control strings Admission is divided into two phases. In the first phase, only mutating admission plugins run. In the second phase, only validating admission plugins run. The names in the below list may represent a validating plugin, a mutating plugin, or both. The order of plugins in which they are passed to this flag does not matter. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. (DEPRECATED: Use --enable-admission-plugins or --disable-admission-plugins instead. Will be removed in a future version.) + --admission-control-config-file string File with admission control configuration. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + +Metrics flags: + + --show-hidden-metrics-for-version string The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + +Logs flags: + + --logging-format string Sets the log format. Permitted formats: "json", "text". + Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency. + Non-default choices are currently alpha and subject to change without warning. (default "text") + +Misc flags: + + --allow-privileged If true, allow privileged containers. [default=false] + --apiserver-count int The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.) (default 1) + --enable-aggregator-routing Turns on aggregator routing requests to endpoints IP rather than cluster IP. + --endpoint-reconciler-type string Use an endpoint reconciler (master-count, lease, none) (default "lease") + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --kubelet-certificate-authority string Path to a cert file for the certificate authority. + --kubelet-client-certificate string Path to a client cert file for TLS. + --kubelet-client-key string Path to a client key file for TLS. + --kubelet-preferred-address-types strings List of the preferred NodeAddressTypes to use for kubelet connections. (default [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]) + --kubelet-timeout duration Timeout for kubelet operations. (default 5s) + --kubernetes-service-node-port int If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP. + --max-connection-bytes-per-sec int If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests. + --proxy-client-cert-file string Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification. + --proxy-client-key-file string Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. + --service-account-signing-key-file string Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. (Requires the 'TokenRequest' feature gate.) + --service-cluster-ip-range string A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. + --service-node-port-range portRange A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range. (default 30000-32767) + +Global flags: + + --add-dir-header If true, adds the file directory to the header of the log messages + --alsologtostderr log to standard error as well as files + -h, --help help for kube-apiserver + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --logtostderr log to standard error instead of files (default true) + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when opening log files + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level number for the log level verbosity + --version version[=true] Print version information and quit + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + +EOF diff --git a/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh b/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh new file mode 100755 index 0000000000..112346cce6 --- /dev/null +++ b/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh @@ -0,0 +1,318 @@ +#!/usr/bin/env sh + +cat </=true|false for a specific API group and version (e.g. apps/v1=true) + api/all=true|false controls all API versions + api/ga=true|false controls all API versions of the form v[0-9]+ + api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+ + api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+ + api/legacy is deprecated, and will be removed in a future version + +Egress selector flags: + + --egress-selector-config-file string File with apiserver egress selector configuration. + +Admission flags: + + --admission-control strings Admission is divided into two phases. In the first phase, only mutating admission plugins run. In the second phase, only validating admission plugins run. The names in the below list may represent a validating plugin, a mutating plugin, or both. The order of plugins in which they are passed to this flag does not matter. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. (DEPRECATED: Use --enable-admission-plugins or --disable-admission-plugins instead. Will be removed in a future version.) + --admission-control-config-file string File with admission control configuration. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + +Metrics flags: + + --show-hidden-metrics-for-version string The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + +Logs flags: + + --experimental-logging-sanitization [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). + Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. + --logging-format string Sets the log format. Permitted formats: "json", "text". + Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency. + Non-default choices are currently alpha and subject to change without warning. (default "text") + +Misc flags: + + --allow-privileged If true, allow privileged containers. [default=false] + --apiserver-count int The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.) (default 1) + --enable-aggregator-routing Turns on aggregator routing requests to endpoints IP rather than cluster IP. + --endpoint-reconciler-type string Use an endpoint reconciler (master-count, lease, none) (default "lease") + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --identity-lease-duration-seconds int The duration of kube-apiserver lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) (default 3600) + --identity-lease-renew-interval-seconds int The interval of kube-apiserver renewing its lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) (default 10) + --kubelet-certificate-authority string Path to a cert file for the certificate authority. + --kubelet-client-certificate string Path to a client cert file for TLS. + --kubelet-client-key string Path to a client key file for TLS. + --kubelet-preferred-address-types strings List of the preferred NodeAddressTypes to use for kubelet connections. (default [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]) + --kubelet-timeout duration Timeout for kubelet operations. (default 5s) + --kubernetes-service-node-port int If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP. + --max-connection-bytes-per-sec int If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests. + --proxy-client-cert-file string Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification. + --proxy-client-key-file string Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. + --service-account-signing-key-file string Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. + --service-cluster-ip-range string A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. + --service-node-port-range portRange A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range. (default 30000-32767) + +Global flags: + + --add-dir-header If true, adds the file directory to the header of the log messages + --alsologtostderr log to standard error as well as files + -h, --help help for kube-apiserver + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --logtostderr log to standard error instead of files (default true) + --one-output If true, only write logs to their native severity level (vs also writing to each lower severity level + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when opening log files + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level number for the log level verbosity + --version version[=true] Print version information and quit + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +EOF diff --git a/pkg/internal/testing/process/arguments.go b/pkg/internal/testing/process/arguments.go new file mode 100644 index 0000000000..391eec1fac --- /dev/null +++ b/pkg/internal/testing/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/pkg/internal/testing/process/arguments_test.go b/pkg/internal/testing/process/arguments_test.go new file mode 100644 index 0000000000..386a3ec52f --- /dev/null +++ b/pkg/internal/testing/process/arguments_test.go @@ -0,0 +1,346 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "net/url" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("Arguments Templates", func() { + It("templates URLs", func() { + templates := []string{ + "plain URL: {{ .SomeURL }}", + "method on URL: {{ .SomeURL.Hostname }}", + "empty URL: {{ .EmptyURL }}", + "handled empty URL: {{- if .EmptyURL }}{{ .EmptyURL }}{{ end }}", + } + data := struct { + SomeURL *url.URL + EmptyURL *url.URL + }{ + &url.URL{Scheme: "https", Host: "the.host.name:3456"}, + nil, + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "plain URL: https://the.host.name:3456", + "method on URL: the.host.name", + "empty URL: <nil>", + "handled empty URL:", + })) + }) + + It("templates strings", func() { + templates := []string{ + "a string: {{ .SomeString }}", + "empty string: {{- .EmptyString }}", + } + data := struct { + SomeString string + EmptyString string + }{ + "this is some random string", + "", + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "a string: this is some random string", + "empty string:", + })) + }) + + It("has no access to unexported fields", func() { + templates := []string{ + "this is just a string", + "this blows up {{ .test }}", + } + data := struct{ test string }{"ooops private"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("is an unexported field of struct"), + )) + }) + + It("errors when field cannot be found", func() { + templates := []string{"this does {{ .NotExist }}"} + data := struct{ Unused string }{"unused"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("can't evaluate field"), + )) + }) + + Context("when joining with structured Arguments", func() { + var ( + args *Arguments + templ = []string{ + "--cheese=parmesean", + "-om", + "nom nom nom", + "--sharpness={{ .sharpness }}", + } + data = TemplateDefaults{ + Data: map[string]string{"sharpness": "extra"}, + Defaults: map[string][]string{ + "cracker": {"ritz"}, + "pickle": {"kosher-dill"}, + }, + MinimalDefaults: map[string][]string{ + "pickle": {"kosher-dill"}, + }, + } + ) + BeforeEach(func() { + args = EmptyArguments() + }) + + Context("when a template is given", func() { + It("should use minimal defaults", func() { + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(SatisfyAll( + Not(ContainElement("--cracker=ritz")), + ContainElement("--pickle=kosher-dill"), + )) + }) + + It("should render the template against the data", func() { + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(ContainElements( + "--sharpness=extra", + )) + }) + + It("should append the rendered template to structured arguments", func() { + args.Append("cheese", "cheddar") + + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(Equal([]string{ + "--cheese=cheddar", + "--cheese=parmesean", + "--pickle=kosher-dill", + "--sharpness=extra", + "-om", + "nom nom nom", + })) + }) + + It("should indicate which arguments were not able to be converted to structured flags", func() { + _, rest, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(rest).To(Equal([]string{"-om", "nom nom nom"})) + + }) + }) + + Context("when no template is given", func() { + It("should render the structured arguments with the given defaults", func() { + args. + Append("cheese", "cheddar", "parmesean"). + Append("cracker", "triscuit") + + Expect(TemplateAndArguments(nil, args, data)).To(Equal([]string{ + "--cheese=cheddar", + "--cheese=parmesean", + "--cracker=ritz", + "--cracker=triscuit", + "--pickle=kosher-dill", + })) + }) + }) + }) + + Context("when converting to structured Arguments", func() { + var args *Arguments + BeforeEach(func() { + args = EmptyArguments() + }) + + It("should skip arguments that don't start with `--`", func() { + rest := SliceToArguments([]string{"-first", "second", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"-first", "second"})) + Expect(args.AsStrings(nil)).To(Equal([]string{"--foo=bar"})) + }) + + It("should skip arguments that don't contain an `=` because they're ambiguous", func() { + rest := SliceToArguments([]string{"--first", "--second", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"--first", "--second"})) + Expect(args.AsStrings(nil)).To(Equal([]string{"--foo=bar"})) + }) + + It("should stop at the flag terminator (`--`)", func() { + rest := SliceToArguments([]string{"--first", "--second", "--", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"--first", "--second", "--", "--foo=bar"})) + Expect(args.AsStrings(nil)).To(BeEmpty()) + }) + + It("should split --foo=bar into Append(foo, bar)", func() { + rest := SliceToArguments([]string{"--foo=bar1", "--foo=bar2"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("foo").Get(nil)).To(Equal([]string{"bar1", "bar2"})) + }) + + It("should split --foo=bar=baz into Append(foo, bar=baz)", func() { + rest := SliceToArguments([]string{"--vmodule=file.go=3", "--vmodule=other.go=4"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("vmodule").Get(nil)).To(Equal([]string{"file.go=3", "other.go=4"})) + }) + + It("should append to existing arguments", func() { + args.Append("foo", "barA") + rest := SliceToArguments([]string{"--foo=bar1", "--foo=bar2"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("foo").Get([]string{"barI"})).To(Equal([]string{"barI", "barA", "bar1", "bar2"})) + }) + }) +}) + +var _ = Describe("Arguments", func() { + Context("when appending", func() { + It("should copy from defaults when appending for the first time", func() { + args := EmptyArguments(). + Append("some-key", "val3") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "val3"})) + }) + + It("should not copy from defaults if the flag has been disabled previously", func() { + args := EmptyArguments(). + Disable("some-key"). + Append("some-key", "val3") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val3"})) + }) + + It("should only copy defaults the first time", func() { + args := EmptyArguments(). + Append("some-key", "val3", "val4"). + Append("some-key", "val5") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "val3", "val4", "val5"})) + }) + + It("should not copy from defaults if the flag has been previously overridden", func() { + args := EmptyArguments(). + Set("some-key", "vala"). + Append("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + + Context("when explicitly overriding defaults", func() { + It("should not copy from defaults, but should append to previous calls", func() { + args := EmptyArguments(). + AppendNoDefaults("some-key", "vala"). + AppendNoDefaults("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + + It("should not copy from defaults, but should respect previous appends' copies", func() { + args := EmptyArguments(). + Append("some-key", "vala"). + AppendNoDefaults("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "vala", "valb", "valc"})) + }) + + It("should not copy from defaults if the flag has been previously appended to ignoring defaults", func() { + args := EmptyArguments(). + AppendNoDefaults("some-key", "vala"). + Append("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + }) + }) + + It("should ignore defaults when overriding", func() { + args := EmptyArguments(). + Set("some-key", "vala") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala"})) + }) + + It("should allow directly setting the argument value for custom argument types", func() { + args := EmptyArguments(). + SetRaw("custom-key", commaArg{"val3"}). + Append("custom-key", "val4") + Expect(args.Get("custom-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1,val2,val3,val4"})) + }) + + Context("when rendering flags", func() { + It("should not render defaults for disabled flags", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + "other-key": {"val"}, + } + args := EmptyArguments(). + Disable("some-key") + Expect(args.AsStrings(defs)).To(ConsistOf("--other-key=val")) + }) + + It("should render name-only flags as --key", func() { + args := EmptyArguments(). + Enable("some-key") + Expect(args.AsStrings(nil)).To(ConsistOf("--some-key")) + }) + + It("should render multiple values as --key=val1, --key=val2", func() { + args := EmptyArguments(). + Append("some-key", "val1", "val2"). + Append("other-key", "vala", "valb") + Expect(args.AsStrings(nil)).To(ConsistOf("--other-key=valb", "--other-key=vala", "--some-key=val1", "--some-key=val2")) + }) + + It("should read from defaults if the user hasn't set a value for a flag", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + } + args := EmptyArguments(). + Append("other-key", "vala", "valb") + Expect(args.AsStrings(defs)).To(ConsistOf("--other-key=valb", "--other-key=vala", "--some-key=val1", "--some-key=val2")) + }) + + It("should not render defaults if the user has set a value for a flag", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + } + args := EmptyArguments(). + Set("some-key", "vala") + Expect(args.AsStrings(defs)).To(ConsistOf("--some-key=vala")) + }) + }) +}) + +type commaArg []string + +func (a commaArg) Get(defs []string) []string { + // not quite, but close enough + return []string{strings.Join(defs, ",") + "," + strings.Join(a, ",")} +} +func (a commaArg) Append(vals ...string) Arg { + return commaArg(append(a, vals...)) //nolint:unconvert +} diff --git a/pkg/internal/testing/process/bin_path_finder.go b/pkg/internal/testing/process/bin_path_finder.go new file mode 100644 index 0000000000..e1428aa6e5 --- /dev/null +++ b/pkg/internal/testing/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/pkg/internal/testing/process/bin_path_finder_test.go b/pkg/internal/testing/process/bin_path_finder_test.go new file mode 100644 index 0000000000..c933478811 --- /dev/null +++ b/pkg/internal/testing/process/bin_path_finder_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BinPathFinder", func() { + var prevAssetPath string + BeforeEach(func() { + prevAssetPath = os.Getenv(EnvAssetsPath) + Expect(os.Unsetenv(EnvAssetsPath)).To(Succeed()) + Expect(os.Unsetenv(EnvAssetOverridePrefix + "_SOME_FAKE")) + Expect(os.Unsetenv(EnvAssetOverridePrefix + "OTHERFAKE")) + }) + AfterEach(func() { + if prevAssetPath != "" { + Expect(os.Setenv(EnvAssetsPath, prevAssetPath)) + } + }) + Context("when individual overrides are present", func() { + BeforeEach(func() { + Expect(os.Setenv(EnvAssetOverridePrefix+"OTHERFAKE", "/other/path")).To(Succeed()) + Expect(os.Setenv(EnvAssetOverridePrefix+"_SOME_FAKE", "/some/path")).To(Succeed()) + // set the global path to make sure we don't prefer it + Expect(os.Setenv(EnvAssetsPath, "/global/path")).To(Succeed()) + }) + + It("should prefer individual overrides, using them unmodified", func() { + Expect(BinPathFinder("otherfake", "/hardcoded/path")).To(Equal("/other/path")) + }) + + It("should convert lowercase to uppercase, remove leading numbers, and replace punctuation with underscores when resolving the env var name", func() { + Expect(BinPathFinder("123.some-fake", "/hardcoded/path")).To(Equal("/some/path")) + }) + }) + + Context("when individual overrides are missing but the global override is present", func() { + BeforeEach(func() { + Expect(os.Setenv(EnvAssetsPath, "/global/path")).To(Succeed()) + }) + It("should prefer the global override, appending the name to that path", func() { + Expect(BinPathFinder("some-fake", "/hardcoded/path")).To(Equal("/global/path/some-fake")) + }) + }) + + Context("when an asset directory is given and no overrides are present", func() { + It("should use the asset directory, appending the name to that path", func() { + Expect(BinPathFinder("some-fake", "/hardcoded/path")).To(Equal("/hardcoded/path/some-fake")) + }) + }) + + Context("when no path configuration is given", func() { + It("should just use the default path", func() { + Expect(BinPathFinder("some-fake", "")).To(Equal("/usr/local/kubebuilder/bin/some-fake")) + }) + }) +}) diff --git a/pkg/internal/testing/process/process.go b/pkg/internal/testing/process/process.go new file mode 100644 index 0000000000..af83c70a2f --- /dev/null +++ b/pkg/internal/testing/process/process.go @@ -0,0 +1,272 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sync" + "syscall" + "time" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := os.MkdirTemp("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + close(pollerStopCh) + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + close(pollerStopCh) + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // there's probably certs *somewhere*, + // but it's fine to just skip validating + // them for health checks during testing + InsecureSkipVerify: true, //nolint:gosec + }, + }, + } + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := client.Get(url.String()) + if err == nil { + res.Body.Close() + if res.StatusCode == http.StatusOK { + ready <- true + return + } + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +} diff --git a/pkg/internal/testing/process/process_suite_test.go b/pkg/internal/testing/process/process_suite_test.go new file mode 100644 index 0000000000..4b9d7ab198 --- /dev/null +++ b/pkg/internal/testing/process/process_suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestInternal(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + suiteName := "Envtest Process Launcher Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/internal/testing/process/process_test.go b/pkg/internal/testing/process/process_test.go new file mode 100644 index 0000000000..4ea6ae7263 --- /dev/null +++ b/pkg/internal/testing/process/process_test.go @@ -0,0 +1,372 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "bytes" + "net" + "net/http" + "net/url" + "os" + "strconv" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + healthURLPath = "/healthz" +) + +var _ = Describe("Start method", func() { + var ( + processState *State + server *ghttp.Server + ) + BeforeEach(func() { + server = ghttp.NewServer() + + processState = &State{ + Path: "bash", + Args: simpleBashScript, + HealthCheck: HealthCheck{ + URL: getServerURL(server), + }, + } + processState.Path = "bash" + processState.Args = simpleBashScript + + }) + AfterEach(func() { + server.Close() + }) + + Context("when process takes too long to start", func() { + BeforeEach(func() { + server.RouteToHandler("GET", healthURLPath, func(resp http.ResponseWriter, _ *http.Request) { + time.Sleep(250 * time.Millisecond) + resp.WriteHeader(http.StatusOK) + }) + }) + It("returns a timeout error", func() { + processState.StartTimeout = 200 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + Eventually(func() bool { done, _ := processState.Exited(); return done }).Should(BeTrue()) + }) + }) + + Context("when the healthcheck returns ok", func() { + BeforeEach(func() { + + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusOK, "")) + }) + + It("can start a process", func() { + processState.StartTimeout = 10 * time.Second + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + + Consistently(processState.Exited).Should(BeFalse()) + }) + + It("hits the endpoint, and successfully starts", func() { + processState.StartTimeout = 100 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(1)) + Consistently(processState.Exited).Should(BeFalse()) + }) + + Context("when the command cannot be started", func() { + var err error + + BeforeEach(func() { + processState = &State{} + processState.Path = "/nonexistent" + + err = processState.Start(nil, nil) + }) + + It("propagates the error", func() { + Expect(os.IsNotExist(err)).To(BeTrue()) + }) + + Context("but Stop() is called on it", func() { + It("does not panic", func() { + stoppingFailedProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + + Expect(stoppingFailedProcess).NotTo(Panic()) + }) + }) + }) + + Context("when IO is configured", func() { + It("can inspect stdout & stderr", func() { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + processState.Args = []string{ + "-c", + ` + echo 'this is stderr' >&2 + echo 'that is stdout' + echo 'i started' >&2 + `, + } + processState.StartTimeout = 1 * time.Second + + Expect(processState.Start(stdout, stderr)).To(Succeed()) + Eventually(processState.Exited).Should(BeTrue()) + + Expect(stdout.String()).To(Equal("that is stdout\n")) + Expect(stderr.String()).To(Equal("this is stderr\ni started\n")) + }) + }) + }) + + Context("when the healthcheck always returns failure", func() { + BeforeEach(func() { + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusInternalServerError, "")) + }) + It("returns a timeout error and stops health API checker", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 500 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + nrReceivedRequests := len(server.ReceivedRequests()) + Expect(nrReceivedRequests).To(Equal(5)) + time.Sleep(200 * time.Millisecond) + Expect(nrReceivedRequests).To(Equal(5)) + }) + }) + + Context("when the healthcheck isn't even listening", func() { + BeforeEach(func() { + server.Close() + }) + + It("returns a timeout error", func() { + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 500 * time.Millisecond + + port, host, err := addr.Suggest("") + Expect(err).NotTo(HaveOccurred()) + + processState.HealthCheck.URL = url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + + err = processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the healthcheck fails initially but succeeds eventually", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusOK, ""), + ) + }) + + It("hits the endpoint repeatedly, and successfully starts", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 20 * time.Second + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(4)) + Consistently(processState.Exited).Should(BeFalse()) + }) + + Context("when the polling interval is not configured", func() { + It("uses the default interval for polling", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = "/helathz" + processState.StartTimeout = 300 * time.Millisecond + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + + Context("when the polling interval is configured", func() { + BeforeEach(func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.HealthCheck.PollInterval = time.Millisecond * 150 + }) + + It("hits the endpoint in the configured interval", func() { + processState.StartTimeout = 3 * processState.HealthCheck.PollInterval + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + }) +}) + +var _ = Describe("Stop method", func() { + var ( + server *ghttp.Server + processState *State + ) + BeforeEach(func() { + server = ghttp.NewServer() + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusOK, "")) + processState = &State{ + Path: "bash", + Args: simpleBashScript, + HealthCheck: HealthCheck{ + URL: getServerURL(server), + }, + } + processState.StartTimeout = 10 * time.Second + }) + + AfterEach(func() { + server.Close() + }) + Context("when Stop() is called", func() { + BeforeEach(func() { + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.StopTimeout = 10 * time.Second + }) + + It("stops the process", func() { + Expect(processState.Stop()).To(Succeed()) + }) + + Context("multiple times", func() { + It("does not error or panic on consecutive calls", func() { + stoppingTheProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + }) + }) + }) + + Context("when the command cannot be stopped", func() { + It("returns a timeout error", func() { + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.StopTimeout = 1 * time.Nanosecond // much shorter than the sleep in the script + + Expect(processState.Stop()).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the directory needs to be cleaned up", func() { + It("removes the directory", func() { + var err error + + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.Dir, err = os.MkdirTemp("", "k8s_test_framework_") + Expect(err).NotTo(HaveOccurred()) + processState.DirNeedsCleaning = true + processState.StopTimeout = 400 * time.Millisecond + + Expect(processState.Stop()).To(Succeed()) + Expect(processState.Dir).NotTo(BeAnExistingFile()) + }) + }) +}) + +var _ = Describe("Init", func() { + Context("when all inputs are provided", func() { + It("passes them through", func() { + ps := &State{ + Dir: "/some/dir", + Path: "/some/path/to/some/bin", + StartTimeout: 20 * time.Hour, + StopTimeout: 65537 * time.Millisecond, + } + + Expect(ps.Init("some name")).To(Succeed()) + + Expect(ps.Dir).To(Equal("/some/dir")) + Expect(ps.DirNeedsCleaning).To(BeFalse()) + Expect(ps.Path).To(Equal("/some/path/to/some/bin")) + Expect(ps.StartTimeout).To(Equal(20 * time.Hour)) + Expect(ps.StopTimeout).To(Equal(65537 * time.Millisecond)) + }) + }) + + Context("when inputs are empty", func() { + It("ps them", func() { + ps := &State{} + Expect(ps.Init("some name")).To(Succeed()) + + Expect(ps.Dir).To(BeADirectory()) + Expect(os.RemoveAll(ps.Dir)).To(Succeed()) + Expect(ps.DirNeedsCleaning).To(BeTrue()) + + Expect(ps.Path).NotTo(BeEmpty()) + + Expect(ps.StartTimeout).NotTo(BeZero()) + Expect(ps.StopTimeout).NotTo(BeZero()) + }) + }) + + Context("when neither name nor path are provided", func() { + It("returns an error", func() { + ps := &State{} + Expect(ps.Init("")).To(MatchError("must have at least one of name or path")) + }) + }) +}) + +var simpleBashScript = []string{ + "-c", + ` + i=0 + while true + do + echo "loop $i" >&2 + let 'i += 1' + sleep 0.2 + done + `, +} + +func getServerURL(server *ghttp.Server) url.URL { + url, err := url.Parse(server.URL()) + Expect(err).NotTo(HaveOccurred()) + url.Path = healthURLPath + return *url +} diff --git a/pkg/leaderelection/doc.go b/pkg/leaderelection/doc.go new file mode 100644 index 0000000000..37a9aefab5 --- /dev/null +++ b/pkg/leaderelection/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package leaderelection contains a constructor for a leader election resource lock. +This is used to ensure that multiple copies of a controller manager can be run with +only one active set of controllers, for active-passive HA. + +It uses built-in Kubernetes leader election APIs. +*/ +package leaderelection diff --git a/pkg/leaderelection/fake/doc.go b/pkg/leaderelection/fake/doc.go new file mode 100644 index 0000000000..3fff7c7189 --- /dev/null +++ b/pkg/leaderelection/fake/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake mocks a resource lock for testing purposes. +Always returns leadership. +*/ +package fake diff --git a/pkg/leaderelection/fake/leader_election.go b/pkg/leaderelection/fake/leader_election.go new file mode 100644 index 0000000000..5a82cf43b8 --- /dev/null +++ b/pkg/leaderelection/fake/leader_election.go @@ -0,0 +1,96 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "encoding/json" + "os" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + "sigs.k8s.io/controller-runtime/pkg/recorder" +) + +// NewResourceLock creates a new ResourceLock for use in testing +// leader election. +func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + // Leader id, needs to be unique + id, err := os.Hostname() + if err != nil { + return nil, err + } + id = id + "_" + string(uuid.NewUUID()) + + return &ResourceLock{ + id: id, + record: resourcelock.LeaderElectionRecord{ + HolderIdentity: id, + LeaseDurationSeconds: 15, + AcquireTime: metav1.NewTime(time.Now()), + RenewTime: metav1.NewTime(time.Now().Add(15 * time.Second)), + LeaderTransitions: 1, + }, + }, nil +} + +// ResourceLock implements the ResourceLockInterface. +// By default returns that the current identity holds the lock. +type ResourceLock struct { + id string + record resourcelock.LeaderElectionRecord +} + +// Get implements the ResourceLockInterface. +func (f *ResourceLock) Get(ctx context.Context) (*resourcelock.LeaderElectionRecord, []byte, error) { + recordBytes, err := json.Marshal(f.record) + if err != nil { + return nil, nil, err + } + return &f.record, recordBytes, nil +} + +// Create implements the ResourceLockInterface. +func (f *ResourceLock) Create(ctx context.Context, ler resourcelock.LeaderElectionRecord) error { + f.record = ler + return nil +} + +// Update implements the ResourceLockInterface. +func (f *ResourceLock) Update(ctx context.Context, ler resourcelock.LeaderElectionRecord) error { + f.record = ler + return nil +} + +// RecordEvent implements the ResourceLockInterface. +func (f *ResourceLock) RecordEvent(s string) { + +} + +// Identity implements the ResourceLockInterface. +func (f *ResourceLock) Identity() string { + return f.id +} + +// Describe implements the ResourceLockInterface. +func (f *ResourceLock) Describe() string { + return f.id +} diff --git a/pkg/leaderelection/leader_election.go b/pkg/leaderelection/leader_election.go new file mode 100644 index 0000000000..ee4fcf4cbe --- /dev/null +++ b/pkg/leaderelection/leader_election.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "errors" + "fmt" + "os" + + "k8s.io/apimachinery/pkg/util/uuid" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + + "sigs.k8s.io/controller-runtime/pkg/recorder" +) + +const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + +// Options provides the required configuration to create a new resource lock. +type Options struct { + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string +} + +// NewResourceLock creates a new resource lock for use in a leader election loop. +func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) { + if !options.LeaderElection { + return nil, nil + } + + // Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was + // used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning + // five minor releases, any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe. + if options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock + } + + // LeaderElectionID must be provided to prevent clashes + if options.LeaderElectionID == "" { + return nil, errors.New("LeaderElectionID must be configured") + } + + // Default the namespace (if running in cluster) + if options.LeaderElectionNamespace == "" { + var err error + options.LeaderElectionNamespace, err = getInClusterNamespace() + if err != nil { + return nil, fmt.Errorf("unable to find leader election namespace: %w", err) + } + } + + // Leader id, needs to be unique + id, err := os.Hostname() + if err != nil { + return nil, err + } + id = id + "_" + string(uuid.NewUUID()) + + // Construct clients for leader election + rest.AddUserAgent(config, "leader-election") + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, err + } + + coordinationClient, err := coordinationv1client.NewForConfig(config) + if err != nil { + return nil, err + } + + return resourcelock.New(options.LeaderElectionResourceLock, + options.LeaderElectionNamespace, + options.LeaderElectionID, + corev1Client, + coordinationClient, + resourcelock.ResourceLockConfig{ + Identity: id, + EventRecorder: recorderProvider.GetEventRecorderFor(id), + }) +} + +func getInClusterNamespace() (string, error) { + // Check whether the namespace file exists. + // If not, we are not running in cluster so can't guess the namespace. + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { + return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") + } else if err != nil { + return "", fmt.Errorf("error checking namespace file: %w", err) + } + + // Load the namespace file and return its content + namespace, err := os.ReadFile(inClusterNamespacePath) + if err != nil { + return "", fmt.Errorf("error reading namespace file: %w", err) + } + return string(namespace), nil +} diff --git a/pkg/log/deleg.go b/pkg/log/deleg.go new file mode 100644 index 0000000000..c82447d919 --- /dev/null +++ b/pkg/log/deleg.go @@ -0,0 +1,199 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogSink + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink + if p.name != nil { + sink = sink.WithName(*p.name) + } + + if p.tags != nil { + sink = sink.WithValues(p.tags...) + } + + p.logger.lock.Lock() + p.logger.logger = sink + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + p.logger.logger = withCallDepth.WithCallDepth(1) + } + p.logger.promise = nil + p.logger.lock.Unlock() + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(sink) + } +} + +// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink + promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *DelegatingLogSink) Enabled(level int) bool { + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) +} + +// WithName provides a new Logger with the name appended. +func (l *DelegatingLogSink) WithName(name string) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + sink := l.logger.WithName(name) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended. +func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + sink := l.logger.WithValues(tags...) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { + l := &DelegatingLogSink{ + logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 0000000000..082dce3adb --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// # The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// # Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + + loggerWasSet = true + dlog.Fulfill(l.GetSink()) +} + +// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries +// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory +// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. +// +// We need to keep the DelegatingLogSink because we have various inits() that get a logger from +// here. They will always get executed before any code that imports controller-runtime +// has a chance to run and hence to set an actual logger. +func init() { + // Init is blocking, so start a new goroutine + go func() { + time.Sleep(30 * time.Second) + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + if !loggerWasSet { + dlog.Fulfill(NullLogSink{}) + } + }() +} + +var ( + loggerWasSetLock sync.Mutex + loggerWasSet bool +) + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + dlog = NewDelegatingLogSink(NullLogSink{}) + Log = logr.New(dlog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/pkg/log/log_suite_test.go b/pkg/log/log_suite_test.go new file mode 100644 index 0000000000..bf8e967cb7 --- /dev/null +++ b/pkg/log/log_suite_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Log Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go new file mode 100644 index 0000000000..7ea56d9b49 --- /dev/null +++ b/pkg/log/log_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "context" + "errors" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ logr.LogSink = &DelegatingLogSink{} + +// logInfo is the information for a particular fakeLogger message. +type logInfo struct { + name []string + tags []interface{} + msg string +} + +// fakeLoggerRoot is the root object to which all fakeLoggers record their messages. +type fakeLoggerRoot struct { + messages []logInfo +} + +// fakeLogger is a fake implementation of logr.Logger that records +// messages, tags, and names, +// just records the name. +type fakeLogger struct { + name []string + tags []interface{} + + root *fakeLoggerRoot +} + +func (f *fakeLogger) Init(info logr.RuntimeInfo) { +} + +func (f *fakeLogger) WithName(name string) logr.LogSink { + names := append([]string(nil), f.name...) + names = append(names, name) + return &fakeLogger{ + name: names, + tags: f.tags, + root: f.root, + } +} + +func (f *fakeLogger) WithValues(vals ...interface{}) logr.LogSink { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, vals...) + return &fakeLogger{ + name: f.name, + tags: tags, + root: f.root, + } +} + +func (f *fakeLogger) Error(err error, msg string, vals ...interface{}) { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, "error", err) + tags = append(tags, vals...) + f.root.messages = append(f.root.messages, logInfo{ + name: append([]string(nil), f.name...), + tags: tags, + msg: msg, + }) +} + +func (f *fakeLogger) Info(level int, msg string, vals ...interface{}) { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, vals...) + f.root.messages = append(f.root.messages, logInfo{ + name: append([]string(nil), f.name...), + tags: tags, + msg: msg, + }) +} + +func (f *fakeLogger) Enabled(level int) bool { return true } + +var _ = Describe("logging", func() { + + Describe("top-level logger", func() { + It("hold newly created loggers until a logger is set", func() { + By("grabbing a new sub-logger and logging to it") + l1 := Log.WithName("runtimeLog").WithValues("newtag", "newvalue1") + l1.Info("before msg") + + By("actually setting the logger") + logger := &fakeLogger{root: &fakeLoggerRoot{}} + SetLogger(logr.New(logger)) + + By("grabbing another sub-logger and logging to both loggers") + l2 := Log.WithName("runtimeLog").WithValues("newtag", "newvalue2") + l1.Info("after msg 1") + l2.Info("after msg 2") + + By("ensuring that messages after the logger was set were logged") + Expect(logger.root.messages).To(ConsistOf( + logInfo{name: []string{"runtimeLog"}, tags: []interface{}{"newtag", "newvalue1"}, msg: "after msg 1"}, + logInfo{name: []string{"runtimeLog"}, tags: []interface{}{"newtag", "newvalue2"}, msg: "after msg 2"}, + )) + }) + }) + + Describe("lazy logger initialization", func() { + var ( + root *fakeLoggerRoot + baseLog logr.LogSink + delegLog *DelegatingLogSink + ) + + BeforeEach(func() { + root = &fakeLoggerRoot{} + baseLog = &fakeLogger{root: root} + delegLog = NewDelegatingLogSink(NullLogSink{}) + }) + + It("should delegate with name", func() { + By("asking for a logger with a name before fulfill, and logging") + befFulfill1 := logr.New(delegLog).WithName("before-fulfill") + befFulfill2 := befFulfill1.WithName("two") + befFulfill1.Info("before fulfill") + + By("logging on the base logger before fulfill") + logr.New(delegLog).Info("before fulfill base") + + By("ensuring that no messages were actually recorded") + Expect(root.messages).To(BeEmpty()) + + By("fulfilling the promise") + delegLog.Fulfill(baseLog) + + By("logging with the existing loggers after fulfilling") + befFulfill1.Info("after 1") + befFulfill2.Info("after 2") + + By("grabbing a new sub-logger of a previously constructed logger and logging to it") + befFulfill1.WithName("after-from-before").Info("after 3") + + By("logging with new loggers") + logr.New(delegLog).WithName("after-fulfill").Info("after 4") + + By("ensuring that the messages are appropriately named") + Expect(root.messages).To(ConsistOf( + logInfo{name: []string{"before-fulfill"}, msg: "after 1"}, + logInfo{name: []string{"before-fulfill", "two"}, msg: "after 2"}, + logInfo{name: []string{"before-fulfill", "after-from-before"}, msg: "after 3"}, + logInfo{name: []string{"after-fulfill"}, msg: "after 4"}, + )) + }) + + // This test in itself will always succeed, a failure will be indicated by the + // race detector going off + It("should be threadsafe", func() { + fulfillDone := make(chan struct{}) + withNameDone := make(chan struct{}) + withValuesDone := make(chan struct{}) + grandChildDone := make(chan struct{}) + logEnabledDone := make(chan struct{}) + logInfoDone := make(chan struct{}) + logErrorDone := make(chan struct{}) + logVDone := make(chan struct{}) + + // Constructing the child in the goroutine does not reliably + // trigger the race detector + child := logr.New(delegLog).WithName("child") + go func() { + defer GinkgoRecover() + delegLog.Fulfill(NullLogSink{}) + close(fulfillDone) + }() + go func() { + defer GinkgoRecover() + delegLog.WithName("with-name") + close(withNameDone) + }() + go func() { + defer GinkgoRecover() + delegLog.WithValues("with-value") + close(withValuesDone) + }() + go func() { + defer GinkgoRecover() + child.WithValues("grandchild") + close(grandChildDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).Enabled() + close(logEnabledDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).Info("hello world") + close(logInfoDone) + }() + go func() { + defer GinkgoRecover() + delegLog.Error(errors.New("err"), "hello world") + close(logErrorDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).V(1) + close(logVDone) + }() + + <-fulfillDone + <-withNameDone + <-withValuesDone + <-grandChildDone + <-logEnabledDone + <-logInfoDone + <-logErrorDone + <-logVDone + }) + + It("should delegate with tags", func() { + By("asking for a logger with a name before fulfill, and logging") + befFulfill1 := logr.New(delegLog).WithValues("tag1", "val1") + befFulfill2 := befFulfill1.WithValues("tag2", "val2") + befFulfill1.Info("before fulfill") + + By("logging on the base logger before fulfill") + logr.New(delegLog).Info("before fulfill base") + + By("ensuring that no messages were actually recorded") + Expect(root.messages).To(BeEmpty()) + + By("fulfilling the promise") + delegLog.Fulfill(baseLog) + + By("logging with the existing loggers after fulfilling") + befFulfill1.Info("after 1") + befFulfill2.Info("after 2") + + By("grabbing a new sub-logger of a previously constructed logger and logging to it") + befFulfill1.WithValues("tag3", "val3").Info("after 3") + + By("logging with new loggers") + logr.New(delegLog).WithValues("tag3", "val3").Info("after 4") + + By("ensuring that the messages are appropriately named") + Expect(root.messages).To(ConsistOf( + logInfo{tags: []interface{}{"tag1", "val1"}, msg: "after 1"}, + logInfo{tags: []interface{}{"tag1", "val1", "tag2", "val2"}, msg: "after 2"}, + logInfo{tags: []interface{}{"tag1", "val1", "tag3", "val3"}, msg: "after 3"}, + logInfo{tags: []interface{}{"tag3", "val3"}, msg: "after 4"}, + )) + }) + + It("shouldn't fulfill twice", func() { + By("fulfilling once") + delegLog.Fulfill(baseLog) + + By("logging a bit") + logr.New(delegLog).Info("msg 1") + + By("fulfilling with a new logger") + delegLog.Fulfill(&fakeLogger{}) + + By("logging some more") + logr.New(delegLog).Info("msg 2") + + By("checking that all log messages are present") + Expect(root.messages).To(ConsistOf( + logInfo{msg: "msg 1"}, + logInfo{msg: "msg 2"}, + )) + }) + }) + + Describe("logger from context", func() { + It("should return default logger when context is empty", func() { + gotLog := FromContext(context.Background()) + Expect(gotLog).To(Not(BeNil())) + }) + + It("should return existing logger", func() { + root := &fakeLoggerRoot{} + baseLog := &fakeLogger{root: root} + + wantLog := logr.New(baseLog).WithName("my-logger") + ctx := IntoContext(context.Background(), wantLog) + + gotLog := FromContext(ctx) + Expect(gotLog).To(Not(BeNil())) + + gotLog.Info("test message") + Expect(root.messages).To(ConsistOf( + logInfo{name: []string{"my-logger"}, msg: "test message"}, + )) + }) + + It("should have added key-values", func() { + root := &fakeLoggerRoot{} + baseLog := &fakeLogger{root: root} + + wantLog := logr.New(baseLog).WithName("my-logger") + ctx := IntoContext(context.Background(), wantLog) + + gotLog := FromContext(ctx, "tag1", "value1") + Expect(gotLog).To(Not(BeNil())) + + gotLog.Info("test message") + Expect(root.messages).To(ConsistOf( + logInfo{name: []string{"my-logger"}, tags: []interface{}{"tag1", "value1"}, msg: "test message"}, + )) + }) + }) + +}) diff --git a/pkg/log/null.go b/pkg/log/null.go new file mode 100644 index 0000000000..f3e81074fe --- /dev/null +++ b/pkg/log/null.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} + +var _ logr.LogSink = NullLogSink{} + +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { + return false +} + +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { + return log +} + +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { + return log +} diff --git a/pkg/log/warning_handler.go b/pkg/log/warning_handler.go new file mode 100644 index 0000000000..e9522632d3 --- /dev/null +++ b/pkg/log/warning_handler.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandler interface. +type KubeAPIWarningLogger struct { + // logger is used to log responses with the warning header + logger logr.Logger + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeader handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger for its logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + l.logger.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings +// with code = 299 to the provided logr.Logger. +func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{logger: l, opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/pkg/log/zap/flags.go b/pkg/log/zap/flags.go new file mode 100644 index 0000000000..fb492b14da --- /dev/null +++ b/pkg/log/zap/flags.go @@ -0,0 +1,168 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package zap contains helpers for setting up a new logr.Logger instance +// using the Zap logging framework. +package zap + +import ( + "flag" + "fmt" + "strconv" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var levelStrings = map[string]zapcore.Level{ + "debug": zap.DebugLevel, + "info": zap.InfoLevel, + "error": zap.ErrorLevel, +} + +var stackLevelStrings = map[string]zapcore.Level{ + "info": zap.InfoLevel, + "error": zap.ErrorLevel, + "panic": zap.PanicLevel, +} + +type encoderFlag struct { + setFunc func(NewEncoderFunc) + value string +} + +var _ flag.Value = &encoderFlag{} + +func (ev *encoderFlag) String() string { + return ev.value +} + +func (ev *encoderFlag) Type() string { + return "encoder" +} + +func (ev *encoderFlag) Set(flagValue string) error { + val := strings.ToLower(flagValue) + switch val { + case "json": + ev.setFunc(newJSONEncoder) + case "console": + ev.setFunc(newConsoleEncoder) + default: + return fmt.Errorf("invalid encoder value \"%s\"", flagValue) + } + ev.value = flagValue + return nil +} + +type levelFlag struct { + setFunc func(zapcore.LevelEnabler) + value string +} + +var _ flag.Value = &levelFlag{} + +func (ev *levelFlag) Set(flagValue string) error { + level, validLevel := levelStrings[strings.ToLower(flagValue)] + if !validLevel { + logLevel, err := strconv.Atoi(flagValue) + if err != nil { + return fmt.Errorf("invalid log level \"%s\"", flagValue) + } + if logLevel > 0 { + intLevel := -1 * logLevel + ev.setFunc(zap.NewAtomicLevelAt(zapcore.Level(int8(intLevel)))) + } else { + return fmt.Errorf("invalid log level \"%s\"", flagValue) + } + } else { + ev.setFunc(zap.NewAtomicLevelAt(level)) + } + ev.value = flagValue + return nil +} + +func (ev *levelFlag) String() string { + return ev.value +} + +func (ev *levelFlag) Type() string { + return "level" +} + +type stackTraceFlag struct { + setFunc func(zapcore.LevelEnabler) + value string +} + +var _ flag.Value = &stackTraceFlag{} + +func (ev *stackTraceFlag) Set(flagValue string) error { + level, validLevel := stackLevelStrings[strings.ToLower(flagValue)] + if !validLevel { + return fmt.Errorf("invalid stacktrace level \"%s\"", flagValue) + } + ev.setFunc(zap.NewAtomicLevelAt(level)) + ev.value = flagValue + return nil +} + +func (ev *stackTraceFlag) String() string { + return ev.value +} + +func (ev *stackTraceFlag) Type() string { + return "level" +} + +type timeEncodingFlag struct { + setFunc func(zapcore.TimeEncoder) + value string +} + +var _ flag.Value = &timeEncodingFlag{} + +func (ev *timeEncodingFlag) String() string { + return ev.value +} + +func (ev *timeEncodingFlag) Type() string { + return "time-encoding" +} + +func (ev *timeEncodingFlag) Set(flagValue string) error { + val := strings.ToLower(flagValue) + switch val { + case "rfc3339nano": + ev.setFunc(zapcore.RFC3339NanoTimeEncoder) + case "rfc3339": + ev.setFunc(zapcore.RFC3339TimeEncoder) + case "iso8601": + ev.setFunc(zapcore.ISO8601TimeEncoder) + case "millis": + ev.setFunc(zapcore.EpochMillisTimeEncoder) + case "nanos": + ev.setFunc(zapcore.EpochNanosTimeEncoder) + case "epoch": + ev.setFunc(zapcore.EpochTimeEncoder) + default: + return fmt.Errorf("invalid time-encoding value \"%s\"", flagValue) + } + + ev.value = flagValue + return nil +} diff --git a/pkg/log/zap/kube_helpers.go b/pkg/log/zap/kube_helpers.go new file mode 100644 index 0000000000..9824470240 --- /dev/null +++ b/pkg/log/zap/kube_helpers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zap + +import ( + "fmt" + "reflect" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// KubeAwareEncoder is a Kubernetes-aware Zap Encoder. +// Instead of trying to force Kubernetes objects to implement +// ObjectMarshaller, we just implement a wrapper around a normal +// ObjectMarshaller that checks for Kubernetes objects. +type KubeAwareEncoder struct { + // Encoder is the zapcore.Encoder that this encoder delegates to + zapcore.Encoder + + // Verbose controls whether or not the full object is printed. + // If false, only name, namespace, api version, and kind are printed. + // Otherwise, the full object is logged. + Verbose bool +} + +// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName. +type namespacedNameWrapper struct { + types.NamespacedName +} + +func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { + if w.Namespace != "" { + enc.AddString("namespace", w.Namespace) + } + + enc.AddString("name", w.Name) + + return nil +} + +// kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects. +type kubeObjectWrapper struct { + obj runtime.Object +} + +// MarshalLogObject implements zapcore.ObjectMarshaler. +func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // TODO(directxman12): log kind and apiversion if not set explicitly (common case) + // -- needs an a scheme to convert to the GVK. + + if reflect.ValueOf(w.obj).IsNil() { + return fmt.Errorf("got nil for runtime.Object") + } + + if gvk := w.obj.GetObjectKind().GroupVersionKind(); gvk.Version != "" { + enc.AddString("apiVersion", gvk.GroupVersion().String()) + enc.AddString("kind", gvk.Kind) + } + + objMeta, err := meta.Accessor(w.obj) + if err != nil { + return fmt.Errorf("got runtime.Object without object metadata: %v", w.obj) + } + + if ns := objMeta.GetNamespace(); ns != "" { + enc.AddString("namespace", ns) + } + enc.AddString("name", objMeta.GetName()) + + return nil +} + +// NB(directxman12): can't just override AddReflected, since the encoder calls AddReflected on itself directly + +// Clone implements zapcore.Encoder. +func (k *KubeAwareEncoder) Clone() zapcore.Encoder { + return &KubeAwareEncoder{ + Encoder: k.Encoder.Clone(), + } +} + +// EncodeEntry implements zapcore.Encoder. +func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { + if k.Verbose { + // Kubernetes objects implement fmt.Stringer, so if we + // want verbose output, just delegate to that. + return k.Encoder.EncodeEntry(entry, fields) + } + + for i, field := range fields { + // intercept stringer fields that happen to be Kubernetes runtime.Object or + // types.NamespacedName values (Kubernetes runtime.Objects commonly + // implement String, apparently). + // *unstructured.Unstructured does NOT implement fmt.Striger interface. + // We have handle it specially. + if field.Type == zapcore.StringerType || field.Type == zapcore.ReflectType { + switch val := field.Interface.(type) { + case runtime.Object: + fields[i] = zapcore.Field{ + Type: zapcore.ObjectMarshalerType, + Key: field.Key, + Interface: kubeObjectWrapper{obj: val}, + } + case types.NamespacedName: + fields[i] = zapcore.Field{ + Type: zapcore.ObjectMarshalerType, + Key: field.Key, + Interface: namespacedNameWrapper{NamespacedName: val}, + } + } + } + } + + return k.Encoder.EncodeEntry(entry, fields) +} diff --git a/pkg/log/zap/zap.go b/pkg/log/zap/zap.go new file mode 100644 index 0000000000..6dce5a04f7 --- /dev/null +++ b/pkg/log/zap/zap.go @@ -0,0 +1,311 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package zap contains helpers for setting up a new logr.Logger instance +// using the Zap logging framework. +package zap + +import ( + "flag" + "io" + "os" + "time" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// EncoderConfigOption is a function that can modify a `zapcore.EncoderConfig`. +type EncoderConfigOption func(*zapcore.EncoderConfig) + +// NewEncoderFunc is a function that creates an Encoder using the provided EncoderConfigOptions. +type NewEncoderFunc func(...EncoderConfigOption) zapcore.Encoder + +// New returns a brand new Logger configured with Opts. It +// uses KubeAwareEncoder which adds Type information and +// Namespace/Name to the log. +func New(opts ...Opts) logr.Logger { + return zapr.NewLogger(NewRaw(opts...)) +} + +// Opts allows to manipulate Options. +type Opts func(*Options) + +// UseDevMode sets the logger to use (or not use) development mode (more +// human-readable output, extra stack traces and logging information, etc). +// See Options.Development. +func UseDevMode(enabled bool) Opts { + return func(o *Options) { + o.Development = enabled + } +} + +// WriteTo configures the logger to write to the given io.Writer, instead of standard error. +// See Options.DestWriter. +func WriteTo(out io.Writer) Opts { + return func(o *Options) { + o.DestWriter = out + } +} + +// Encoder configures how the logger will encode the output e.g JSON or console. +// See Options.Encoder. +func Encoder(encoder zapcore.Encoder) func(o *Options) { + return func(o *Options) { + o.Encoder = encoder + } +} + +// JSONEncoder configures the logger to use a JSON Encoder. +func JSONEncoder(opts ...EncoderConfigOption) func(o *Options) { + return func(o *Options) { + o.Encoder = newJSONEncoder(opts...) + } +} + +func newJSONEncoder(opts ...EncoderConfigOption) zapcore.Encoder { + encoderConfig := zap.NewProductionEncoderConfig() + for _, opt := range opts { + opt(&encoderConfig) + } + return zapcore.NewJSONEncoder(encoderConfig) +} + +// ConsoleEncoder configures the logger to use a Console encoder. +func ConsoleEncoder(opts ...EncoderConfigOption) func(o *Options) { + return func(o *Options) { + o.Encoder = newConsoleEncoder(opts...) + } +} + +func newConsoleEncoder(opts ...EncoderConfigOption) zapcore.Encoder { + encoderConfig := zap.NewDevelopmentEncoderConfig() + for _, opt := range opts { + opt(&encoderConfig) + } + return zapcore.NewConsoleEncoder(encoderConfig) +} + +// Level sets Options.Level, which configures the the minimum enabled logging level e.g Debug, Info. +// A zap log level should be multiplied by -1 to get the logr verbosity. +// For example, to get logr verbosity of 3, pass zapcore.Level(-3) to this Opts. +// See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity. +func Level(level zapcore.LevelEnabler) func(o *Options) { + return func(o *Options) { + o.Level = level + } +} + +// StacktraceLevel sets Options.StacktraceLevel, which configures the logger to record a stack trace +// for all messages at or above a given level. +// See the Level Opts for the relationship of zap log level to logr verbosity. +func StacktraceLevel(stacktraceLevel zapcore.LevelEnabler) func(o *Options) { + return func(o *Options) { + o.StacktraceLevel = stacktraceLevel + } +} + +// RawZapOpts allows appending arbitrary zap.Options to configure the underlying zap logger. +// See Options.ZapOpts. +func RawZapOpts(zapOpts ...zap.Option) func(o *Options) { + return func(o *Options) { + o.ZapOpts = append(o.ZapOpts, zapOpts...) + } +} + +// Options contains all possible settings. +type Options struct { + // Development configures the logger to use a Zap development config + // (stacktraces on warnings, no sampling), otherwise a Zap production + // config will be used (stacktraces on errors, sampling). + Development bool + // Encoder configures how Zap will encode the output. Defaults to + // console when Development is true and JSON otherwise + Encoder zapcore.Encoder + // EncoderConfigOptions can modify the EncoderConfig needed to initialize an Encoder. + // See https://pkg.go.dev/go.uber.org/zap/zapcore#EncoderConfig for the list of options + // that can be configured. + // Note that the EncoderConfigOptions are not applied when the Encoder option is already set. + EncoderConfigOptions []EncoderConfigOption + // NewEncoder configures Encoder using the provided EncoderConfigOptions. + // Note that the NewEncoder function is not used when the Encoder option is already set. + NewEncoder NewEncoderFunc + // DestWriter controls the destination of the log output. Defaults to + // os.Stderr. + DestWriter io.Writer + // DestWritter controls the destination of the log output. Defaults to + // os.Stderr. + // + // Deprecated: Use DestWriter instead + DestWritter io.Writer + // Level configures the verbosity of the logging. + // Defaults to Debug when Development is true and Info otherwise. + // A zap log level should be multiplied by -1 to get the logr verbosity. + // For example, to get logr verbosity of 3, set this field to zapcore.Level(-3). + // See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity. + Level zapcore.LevelEnabler + // StacktraceLevel is the level at and above which stacktraces will + // be recorded for all messages. Defaults to Warn when Development + // is true and Error otherwise. + // See Level for the relationship of zap log level to logr verbosity. + StacktraceLevel zapcore.LevelEnabler + // ZapOpts allows passing arbitrary zap.Options to configure on the + // underlying Zap logger. + ZapOpts []zap.Option + // TimeEncoder specifies the encoder for the timestamps in log messages. + // Defaults to EpochTimeEncoder as this is the default in Zap currently. + TimeEncoder zapcore.TimeEncoder +} + +// addDefaults adds defaults to the Options. +func (o *Options) addDefaults() { + if o.DestWriter == nil && o.DestWritter == nil { + o.DestWriter = os.Stderr + } else if o.DestWriter == nil && o.DestWritter != nil { + // while misspelled DestWritter is deprecated but still not removed + o.DestWriter = o.DestWritter + } + + if o.Development { + if o.NewEncoder == nil { + o.NewEncoder = newConsoleEncoder + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.DebugLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.WarnLevel) + o.StacktraceLevel = &lvl + } + o.ZapOpts = append(o.ZapOpts, zap.Development()) + } else { + if o.NewEncoder == nil { + o.NewEncoder = newJSONEncoder + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.InfoLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.ErrorLevel) + o.StacktraceLevel = &lvl + } + // Disable sampling for increased Debug levels. Otherwise, this will + // cause index out of bounds errors in the sampling code. + if !o.Level.Enabled(zapcore.Level(-2)) { + o.ZapOpts = append(o.ZapOpts, + zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSamplerWithOptions(core, time.Second, 100, 100) + })) + } + } + + if o.TimeEncoder == nil { + o.TimeEncoder = zapcore.EpochTimeEncoder + } + f := func(ecfg *zapcore.EncoderConfig) { + ecfg.EncodeTime = o.TimeEncoder + } + // prepend instead of append it in case someone adds a time encoder option in it + o.EncoderConfigOptions = append([]EncoderConfigOption{f}, o.EncoderConfigOptions...) + + if o.Encoder == nil { + o.Encoder = o.NewEncoder(o.EncoderConfigOptions...) + } + o.ZapOpts = append(o.ZapOpts, zap.AddStacktrace(o.StacktraceLevel)) +} + +// NewRaw returns a new zap.Logger configured with the passed Opts +// or their defaults. It uses KubeAwareEncoder which adds Type +// information and Namespace/Name to the log. +func NewRaw(opts ...Opts) *zap.Logger { + o := &Options{} + for _, opt := range opts { + opt(o) + } + o.addDefaults() + + // this basically mimics NewConfig, but with a custom sink + sink := zapcore.AddSync(o.DestWriter) + + o.ZapOpts = append(o.ZapOpts, zap.ErrorOutput(sink)) + log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: o.Encoder, Verbose: o.Development}, sink, o.Level)) + log = log.WithOptions(o.ZapOpts...) + return log +} + +// BindFlags will parse the given flagset for zap option flags and set the log options accordingly: +// - zap-devel: +// Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn) +// Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) +// - zap-encoder: Zap log encoding (one of 'json' or 'console') +// - zap-log-level: Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', +// or any integer value > 0 which corresponds to custom debug levels of increasing verbosity"). +// - zap-stacktrace-level: Zap Level at and above which stacktraces are captured (one of 'info', 'error' or 'panic') +// - zap-time-encoding: Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'), +// Defaults to 'epoch'. +func (o *Options) BindFlags(fs *flag.FlagSet) { + // Set Development mode value + fs.BoolVar(&o.Development, "zap-devel", o.Development, + "Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). "+ + "Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)") + + // Set Encoder value + var encVal encoderFlag + encVal.setFunc = func(fromFlag NewEncoderFunc) { + o.NewEncoder = fromFlag + } + fs.Var(&encVal, "zap-encoder", "Zap log encoding (one of 'json' or 'console')") + + // Set the Log Level + var levelVal levelFlag + levelVal.setFunc = func(fromFlag zapcore.LevelEnabler) { + o.Level = fromFlag + } + fs.Var(&levelVal, "zap-log-level", + "Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', "+ + "or any integer value > 0 which corresponds to custom debug levels of increasing verbosity") + + // Set the StrackTrace Level + var stackVal stackTraceFlag + stackVal.setFunc = func(fromFlag zapcore.LevelEnabler) { + o.StacktraceLevel = fromFlag + } + fs.Var(&stackVal, "zap-stacktrace-level", + "Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').") + + // Set the time encoding + var timeEncoderVal timeEncodingFlag + timeEncoderVal.setFunc = func(fromFlag zapcore.TimeEncoder) { + o.TimeEncoder = fromFlag + } + fs.Var(&timeEncoderVal, "zap-time-encoding", "Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.") +} + +// UseFlagOptions configures the logger to use the Options set by parsing zap option flags from the CLI. +// +// opts := zap.Options{} +// opts.BindFlags(flag.CommandLine) +// flag.Parse() +// log := zap.New(zap.UseFlagOptions(&opts)) +func UseFlagOptions(in *Options) Opts { + return func(o *Options) { + *o = *in + } +} diff --git a/pkg/log/zap/zap_suite_test.go b/pkg/log/zap/zap_suite_test.go new file mode 100644 index 0000000000..43044d8066 --- /dev/null +++ b/pkg/log/zap/zap_suite_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zap + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Zap Log Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/log/zap/zap_test.go b/pkg/log/zap/zap_test.go new file mode 100644 index 0000000000..2d18476e1f --- /dev/null +++ b/pkg/log/zap/zap_test.go @@ -0,0 +1,602 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zap + +import ( + "bytes" + "encoding/json" + "flag" + "os" + "reflect" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" +) + +// testStringer is a fmt.Stringer. +type testStringer struct{} + +func (testStringer) String() string { + return "value" +} + +// fakeSyncWriter is a fake zap.SyncerWriter that lets us test if sync was called. +type fakeSyncWriter bool + +func (w *fakeSyncWriter) Write(p []byte) (int, error) { + return len(p), nil +} +func (w *fakeSyncWriter) Sync() error { + *w = true + return nil +} + +// logInfo is the information for a particular fakeLogger message. +type logInfo struct { + name []string + tags []interface{} + msg string +} + +// fakeLoggerRoot is the root object to which all fakeLoggers record their messages. +type fakeLoggerRoot struct { + messages []logInfo +} + +var _ logr.LogSink = &fakeLogger{} + +// fakeLogger is a fake implementation of logr.Logger that records +// messages, tags, and names, +// just records the name. +type fakeLogger struct { + name []string + tags []interface{} + + root *fakeLoggerRoot +} + +func (f *fakeLogger) Init(info logr.RuntimeInfo) { +} + +func (f *fakeLogger) WithName(name string) logr.LogSink { + names := append([]string(nil), f.name...) + names = append(names, name) + return &fakeLogger{ + name: names, + tags: f.tags, + root: f.root, + } +} + +func (f *fakeLogger) WithValues(vals ...interface{}) logr.LogSink { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, vals...) + return &fakeLogger{ + name: f.name, + tags: tags, + root: f.root, + } +} + +func (f *fakeLogger) Error(err error, msg string, vals ...interface{}) { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, "error", err) + tags = append(tags, vals...) + f.root.messages = append(f.root.messages, logInfo{ + name: append([]string(nil), f.name...), + tags: tags, + msg: msg, + }) +} + +func (f *fakeLogger) Info(level int, msg string, vals ...interface{}) { + tags := append([]interface{}(nil), f.tags...) + tags = append(tags, vals...) + f.root.messages = append(f.root.messages, logInfo{ + name: append([]string(nil), f.name...), + tags: tags, + msg: msg, + }) +} + +func (f *fakeLogger) Enabled(level int) bool { return true } +func (f *fakeLogger) V(lvl int) logr.LogSink { return f } + +var _ = Describe("Zap options setup", func() { + var opts *Options + + BeforeEach(func() { + opts = &Options{} + }) + + It("should enable development mode", func() { + UseDevMode(true)(opts) + Expect(opts.Development).To(BeTrue()) + }) + + It("should disable development mode", func() { + UseDevMode(false)(opts) + Expect(opts.Development).To(BeFalse()) + }) + + It("should set a custom writer", func() { + var w fakeSyncWriter + WriteTo(&w)(opts) + Expect(opts.DestWriter).To(Equal(&w)) + }) +}) + +var _ = Describe("Zap logger setup", func() { + Context("when logging kubernetes objects", func() { + var logOut *bytes.Buffer + var logger logr.Logger + + defineTests := func() { + It("should log a standard namespaced Kubernetes object name and namespace", func() { + pod := &corev1.Pod{} + pod.Name = "some-pod" + pod.Namespace = "some-ns" + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": pod.Name, + "namespace": pod.Namespace, + })) + }) + + It("should work fine with normal stringers", func() { + logger.Info("here's a non-kubernetes stringer", "thing", testStringer{}) + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", "value")) + }) + + It("should log a standard non-namespaced Kubernetes object name", func() { + node := &corev1.Node{} + node.Name = "some-node" + logger.Info("here's a kubernetes object", "thing", node) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": node.Name, + })) + }) + + It("should log a standard Kubernetes object's kind, if set", func() { + node := &corev1.Node{} + node.Name = "some-node" + node.APIVersion = "v1" + node.Kind = "Node" + logger.Info("here's a kubernetes object", "thing", node) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": node.Name, + "apiVersion": "v1", + "kind": "Node", + })) + }) + + It("should log a standard non-namespaced NamespacedName name", func() { + name := types.NamespacedName{Name: "some-node"} + logger.Info("here's a kubernetes object", "thing", name) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": name.Name, + })) + }) + + It("should log an unstructured Kubernetes object", func() { + pod := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "some-pod", + "namespace": "some-ns", + }, + }, + } + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": "some-pod", + "namespace": "some-ns", + })) + }) + + It("should log a standard namespaced NamespacedName name and namespace", func() { + name := types.NamespacedName{Name: "some-pod", Namespace: "some-ns"} + logger.Info("here's a kubernetes object", "thing", name) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + })) + }) + + It("should not panic with nil obj", func() { + var pod *corev1.Pod + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + Expect(string(outRaw)).Should(ContainSubstring("got nil for runtime.Object")) + }) + } + + Context("with logger created using New", func() { + BeforeEach(func() { + logOut = new(bytes.Buffer) + By("setting up the logger") + // use production settings (false) to get just json output + logger = New(WriteTo(logOut), UseDevMode(false)) + }) + defineTests() + + }) + }) +}) + +var _ = Describe("Zap log level flag options setup", func() { + var ( + fromFlags Options + fs flag.FlagSet + logInfoLevel0 = "info text" + logDebugLevel1 = "debug 1 text" + logDebugLevel2 = "debug 2 text" + logDebugLevel3 = "debug 3 text" + ) + + BeforeEach(func() { + fromFlags = Options{} + fs = *flag.NewFlagSet(os.Args[0], flag.ExitOnError) + }) + + Context("with zap-log-level options provided", func() { + It("Should output logs for info and debug zap-log-level.", func() { + args := []string{"--zap-log-level=debug"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + + }) + + It("Should output only error logs, otherwise empty logs", func() { + args := []string{"--zap-log-level=error"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + + outRaw := logOut.Bytes() + + Expect(outRaw).To(BeEmpty()) + }) + + }) + + Context("with zap-log-level with increased verbosity.", func() { + It("Should output debug and info log, with default production mode.", func() { + args := []string{"--zap-log-level=1"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + }) + + It("Should output info and debug logs, with development mode.", func() { + args := []string{"--zap-log-level=1", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + }) + + It("Should output info, and debug logs with increased verbosity, and with development mode set to true.", func() { + args := []string{"--zap-log-level=3", "--zap-devel=false"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + logger.V(2).Info(logDebugLevel2) + logger.V(3).Info(logDebugLevel3) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel2)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel3)) + + }) + It("Should output info, and debug logs with increased verbosity, and with production mode set to true.", func() { + args := []string{"--zap-log-level=3", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + logger.V(2).Info(logDebugLevel2) + logger.V(3).Info(logDebugLevel3) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel2)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel3)) + + }) + + }) + + Context("with zap-stacktrace-level options provided", func() { + + It("Should output stacktrace at info level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=info", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.InfoLevel)).To(BeTrue()) + }) + + It("Should output stacktrace at error level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=error", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.ErrorLevel)).To(BeTrue()) + }) + + It("Should output stacktrace at panic level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=panic", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.PanicLevel)).To(BeTrue()) + Expect(out.StacktraceLevel.Enabled(zapcore.ErrorLevel)).To(BeFalse()) + Expect(out.StacktraceLevel.Enabled(zapcore.InfoLevel)).To(BeFalse()) + }) + + }) + + Context("with only -zap-devel flag provided", func() { + It("Should set dev=true.", func() { + args := []string{"--zap-devel=true"} + fromFlags.BindFlags(&fs) + if err := fs.Parse(args); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.Development).To(BeTrue()) + Expect(out.Encoder).To(BeNil()) + Expect(out.Level).To(BeNil()) + Expect(out.StacktraceLevel).To(BeNil()) + Expect(out.EncoderConfigOptions).To(BeNil()) + }) + It("Should set dev=false", func() { + args := []string{"--zap-devel=false"} + fromFlags.BindFlags(&fs) + if err := fs.Parse(args); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.Development).To(BeFalse()) + Expect(out.Encoder).To(BeNil()) + Expect(out.Level).To(BeNil()) + Expect(out.StacktraceLevel).To(BeNil()) + Expect(out.EncoderConfigOptions).To(BeNil()) + + }) + }) + + Context("with zap-time-encoding flag provided", func() { + + It("Should set time encoder in options", func() { + args := []string{"--zap-time-encoding=rfc3339"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + opt := Options{} + UseFlagOptions(&fromFlags)(&opt) + opt.addDefaults() + + optVal := reflect.ValueOf(opt.TimeEncoder) + expVal := reflect.ValueOf(zapcore.RFC3339TimeEncoder) + + Expect(optVal.Pointer()).To(Equal(expVal.Pointer())) + }) + + It("Should default to 'epoch' time encoding", func() { + args := []string{""} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + opt := Options{} + UseFlagOptions(&fromFlags)(&opt) + opt.addDefaults() + + optVal := reflect.ValueOf(opt.TimeEncoder) + expVal := reflect.ValueOf(zapcore.EpochTimeEncoder) + + Expect(optVal.Pointer()).To(Equal(expVal.Pointer())) + }) + + It("Should return an error message, with unknown time-encoding", func() { + fs = *flag.NewFlagSet(os.Args[0], flag.ContinueOnError) + args := []string{"--zap-time-encoding=foobar"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).To(HaveOccurred()) + }) + + It("Should propagate time encoder to logger", func() { + // zaps ISO8601TimeEncoder uses 2006-01-02T15:04:05.000Z0700 as pattern for iso8601 encoding + iso8601Pattern := `^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}([-+][0-9]{4}|Z)` + + args := []string{"--zap-time-encoding=iso8601"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.Info("This is a test message") + + outRaw := logOut.Bytes() + + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + Expect(res["ts"]).Should(MatchRegexp(iso8601Pattern)) + }) + + }) + + Context("with encoder options provided programmatically", func() { + + It("Should set JSON Encoder, with given Millis TimeEncoder option, and MessageKey", func() { + logOut := new(bytes.Buffer) + f := func(ec *zapcore.EncoderConfig) { + ec.MessageKey = "MillisTimeFormat" + } + opts := func(o *Options) { + o.EncoderConfigOptions = append(o.EncoderConfigOptions, f) + } + log := New(UseDevMode(false), WriteTo(logOut), opts) + log.Info("This is a test message") + outRaw := logOut.Bytes() + // Assert for JSON Encoder + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + // Assert for MessageKey + Expect(string(outRaw)).Should(ContainSubstring("MillisTimeFormat")) + }) + + Context("using Level()", func() { + var logOut *bytes.Buffer + + BeforeEach(func() { + logOut = new(bytes.Buffer) + }) + + It("logs with negative logr level", func() { + By("setting up the logger") + logger := New(WriteTo(logOut), Level(zapcore.Level(-3))) + logger.V(3).Info("test 3") // Should be logged + Expect(logOut.String()).To(ContainSubstring(`"msg":"test 3"`)) + logOut.Truncate(0) + logger.V(1).Info("test 1") // Should be logged + Expect(logOut.String()).To(ContainSubstring(`"msg":"test 1"`)) + logOut.Truncate(0) + logger.V(4).Info("test 4") // Should not be logged + Expect(logOut.String()).To(BeEmpty()) + logger.V(-3).Info("test -3") + Expect(logOut.String()).To(ContainSubstring("test -3")) + }) + It("does not log with positive logr level", func() { + By("setting up the logger") + logger := New(WriteTo(logOut), Level(zapcore.Level(1))) + logger.V(1).Info("test 1") + Expect(logOut.String()).To(BeEmpty()) + logger.V(3).Info("test 3") + Expect(logOut.String()).To(BeEmpty()) + }) + }) + }) +}) diff --git a/pkg/manager/doc.go b/pkg/manager/doc.go new file mode 100644 index 0000000000..f2976c7f75 --- /dev/null +++ b/pkg/manager/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package manager is required to create Controllers and provides shared dependencies such as clients, caches, schemes, +etc. Controllers must be started by calling Manager.Start. +*/ +package manager diff --git a/pkg/manager/example_test.go b/pkg/manager/example_test.go new file mode 100644 index 0000000000..17557d1817 --- /dev/null +++ b/pkg/manager/example_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager_test + +import ( + "context" + "os" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client/config" + conf "sigs.k8s.io/controller-runtime/pkg/config" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +var ( + mgr manager.Manager + // NB: don't call SetLogger in init(), or else you'll mess up logging in the main suite. + log = logf.Log.WithName("manager-examples") +) + +// This example creates a new Manager that can be used with controller.New to create Controllers. +func ExampleNew() { + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + + mgr, err := manager.New(cfg, manager.Options{}) + if err != nil { + log.Error(err, "unable to set up manager") + os.Exit(1) + } + log.Info("created manager", "manager", mgr) +} + +// This example creates a new Manager that has a cache scoped to a list of namespaces. +func ExampleNew_multinamespaceCache() { + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + + mgr, err := manager.New(cfg, manager.Options{ + NewCache: cache.MultiNamespacedCacheBuilder([]string{"namespace1", "namespace2"}), + }) + if err != nil { + log.Error(err, "unable to set up manager") + os.Exit(1) + } + log.Info("created manager", "manager", mgr) +} + +// This example adds a Runnable for the Manager to Start. +func ExampleManager_add() { + err := mgr.Add(manager.RunnableFunc(func(context.Context) error { + // Do something + return nil + })) + if err != nil { + log.Error(err, "unable add a runnable to the manager") + os.Exit(1) + } +} + +// This example starts a Manager that has had Runnables added. +func ExampleManager_start() { + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "unable start the manager") + os.Exit(1) + } +} + +// This example will populate Options from a custom config file +// using defaults. +func ExampleOptions_andFrom() { + opts := manager.Options{} + if _, err := opts.AndFrom(conf.File()); err != nil { + log.Error(err, "unable to load config") + os.Exit(1) + } + + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + + mgr, err := manager.New(cfg, opts) + if err != nil { + log.Error(err, "unable to set up manager") + os.Exit(1) + } + log.Info("created manager", "manager", mgr) +} + +// This example will populate Options from a custom config file +// using defaults and will panic if there are errors. +func ExampleOptions_andFromOrDie() { + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + + mgr, err := manager.New(cfg, manager.Options{}.AndFromOrDie(conf.File())) + if err != nil { + log.Error(err, "unable to set up manager") + os.Exit(1) + } + log.Info("created manager", "manager", mgr) +} diff --git a/pkg/manager/internal.go b/pkg/manager/internal.go new file mode 100644 index 0000000000..5b22c628f9 --- /dev/null +++ b/pkg/manager/internal.go @@ -0,0 +1,652 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +const ( + // Values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go + defaultLeaseDuration = 15 * time.Second + defaultRenewDeadline = 10 * time.Second + defaultRetryPeriod = 2 * time.Second + defaultGracefulShutdownPeriod = 30 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" + defaultMetricsEndpoint = "/metrics" +) + +var _ Runnable = &controllerManager{} + +type controllerManager struct { + sync.Mutex + started bool + + stopProcedureEngaged *int64 + errChan chan error + runnables *runnables + + // cluster holds a variety of methods to interact with a cluster. Required. + cluster cluster.Cluster + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // resourceLock forms the basis for leader election + resourceLock resourcelock.Interface + + // leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease + // on shutdown + leaderElectionReleaseOnCancel bool + + // metricsListener is used to serve prometheus metrics + metricsListener net.Listener + + // metricsExtraHandlers contains extra handlers to register on http server that serves metrics. + metricsExtraHandlers map[string]http.Handler + + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string + + // Readyz probe handler + readyzHandler *healthz.Handler + + // Healthz probe handler + healthzHandler *healthz.Handler + + // controllerOptions are the global controller options. + controllerOptions v1alpha1.ControllerConfigurationSpec + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger + + // leaderElectionStopped is an internal channel used to signal the stopping procedure that the + // LeaderElection.Run(...) function has returned and the shutdown can proceed. + leaderElectionStopped chan struct{} + + // leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper, + // because for safety reasons we need to os.Exit() when we lose the leader election, meaning that + // it must be deferred until after gracefulShutdown is done. + leaderElectionCancel context.CancelFunc + + // elected is closed when this manager becomes the leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + elected chan struct{} + + // port is the port that the webhook server serves at. + port int + // host is the hostname that the webhook server binds to. + host string + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs + certDir string + + webhookServer *webhook.Server + // webhookServerOnce will be called in GetWebhookServer() to optionally initialize + // webhookServer if unset, and Add() it to controllerManager. + webhookServerOnce sync.Once + + // leaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. + leaseDuration time.Duration + // renewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. + renewDeadline time.Duration + // retryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. + retryPeriod time.Duration + + // gracefulShutdownTimeout is the duration given to runnable to stop + // before the manager actually returns on stop. + gracefulShutdownTimeout time.Duration + + // onStoppedLeading is callled when the leader election lease is lost. + // It can be overridden for tests. + onStoppedLeading func() + + // shutdownCtx is the context that can be used during shutdown. It will be cancelled + // after the gracefulShutdownTimeout ended. It must not be accessed before internalStop + // is closed because it will be nil. + shutdownCtx context.Context + + internalCtx context.Context + internalCancel context.CancelFunc + + // internalProceduresStop channel is used internally to the manager when coordinating + // the proper shutdown of servers. This channel is also used for dependency injection. + internalProceduresStop chan struct{} +} + +type hasCache interface { + Runnable + GetCache() cache.Cache +} + +// Add sets dependencies on i, and adds it to the list of Runnables to start. +func (cm *controllerManager) Add(r Runnable) error { + cm.Lock() + defer cm.Unlock() + return cm.add(r) +} + +func (cm *controllerManager) add(r Runnable) error { + // Set dependencies on the object + if err := cm.SetFields(r); err != nil { + return err + } + return cm.runnables.Add(r) +} + +// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. +func (cm *controllerManager) SetFields(i interface{}) error { + if err := cm.cluster.SetFields(i); err != nil { + return err + } + if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { + return err + } + if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { + return err + } + if _, err := inject.LoggerInto(cm.logger, i); err != nil { + return err + } + + return nil +} + +// AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + + if _, found := cm.metricsExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + + cm.metricsExtraHandlers[path] = handler + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} + +// AddHealthzCheck allows you to add Healthz checker. +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check + return nil +} + +// AddReadyzCheck allows you to add Readyz checker. +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.readyzHandler.Checks[name] = check + return nil +} + +func (cm *controllerManager) GetConfig() *rest.Config { + return cm.cluster.GetConfig() +} + +func (cm *controllerManager) GetClient() client.Client { + return cm.cluster.GetClient() +} + +func (cm *controllerManager) GetScheme() *runtime.Scheme { + return cm.cluster.GetScheme() +} + +func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer { + return cm.cluster.GetFieldIndexer() +} + +func (cm *controllerManager) GetCache() cache.Cache { + return cm.cluster.GetCache() +} + +func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder { + return cm.cluster.GetEventRecorderFor(name) +} + +func (cm *controllerManager) GetRESTMapper() meta.RESTMapper { + return cm.cluster.GetRESTMapper() +} + +func (cm *controllerManager) GetAPIReader() client.Reader { + return cm.cluster.GetAPIReader() +} + +func (cm *controllerManager) GetWebhookServer() *webhook.Server { + cm.webhookServerOnce.Do(func() { + if cm.webhookServer == nil { + cm.webhookServer = &webhook.Server{ + Port: cm.port, + Host: cm.host, + CertDir: cm.certDir, + } + } + if err := cm.Add(cm.webhookServer); err != nil { + panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) + } + }) + return cm.webhookServer +} + +func (cm *controllerManager) GetLogger() logr.Logger { + return cm.logger +} + +func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { + return cm.controllerOptions +} + +func (cm *controllerManager) serveMetrics() { + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ + ErrorHandling: promhttp.HTTPErrorOnError, + }) + // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics + mux := http.NewServeMux() + mux.Handle(defaultMetricsEndpoint, handler) + for path, extraHandler := range cm.metricsExtraHandlers { + mux.Handle(path, extraHandler) + } + + server := httpserver.New(mux) + go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) +} + +func (cm *controllerManager) serveHealthProbes() { + mux := http.NewServeMux() + server := httpserver.New(mux) + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) +} + +func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { + log = log.WithValues("kind", kind, "addr", ln.Addr()) + + go func() { + log.Info("Starting server") + if err := server.Serve(ln); err != nil { + if errors.Is(err, http.ErrServerClosed) { + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + // There might be cases where connections are still open and we try to shutdown + // but not having enough time to close the connection causes an error in Serve + // + // In that case we want to avoid returning an error to the main error channel. + log.Error(err, "error on Serve after stop has been engaged") + return + } + cm.errChan <- err + } + }() + + // Shutdown the server when stop is closed. + <-cm.internalProceduresStop + if err := server.Shutdown(cm.shutdownCtx); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Avoid logging context related errors. + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + cm.logger.Error(err, "error on Shutdown after stop has been engaged") + return + } + cm.errChan <- err + } +} + +// Start starts the manager and waits indefinitely. +// There is only two ways to have start return: +// An error has occurred during in one of the internal operations, +// such as leader election, cache start, webhooks, and so on. +// Or, the context is cancelled. +func (cm *controllerManager) Start(ctx context.Context) (err error) { + cm.Lock() + if cm.started { + cm.Unlock() + return errors.New("manager already started") + } + var ready bool + defer func() { + // Only unlock the manager if we haven't reached + // the internal readiness condition. + if !ready { + cm.Unlock() + } + }() + + // Initialize the internal context. + cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request + stopComplete := make(chan struct{}) + defer close(stopComplete) + // This must be deferred after closing stopComplete, otherwise we deadlock. + defer func() { + // https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg + stopErr := cm.engageStopProcedure(stopComplete) + if stopErr != nil { + if err != nil { + // Utilerrors.Aggregate allows to use errors.Is for all contained errors + // whereas fmt.Errorf allows wrapping at most one error which means the + // other one can not be found anymore. + err = kerrors.NewAggregate([]error{err, stopErr}) + } else { + err = stopErr + } + } + }() + + // Add the cluster runnable. + if err := cm.add(cm.cluster); err != nil { + return fmt.Errorf("failed to add cluster to runnables: %w", err) + } + + // Metrics should be served whether the controller is leader or not. + // (If we don't serve metrics for non-leaders, prometheus will still scrape + // the pod but will get a connection refused). + if cm.metricsListener != nil { + cm.serveMetrics() + } + + // Serve health probes. + if cm.healthProbeListener != nil { + cm.serveHealthProbes() + } + + // First start any webhook servers, which includes conversion, validation, and defaulting + // webhooks that are registered. + // + // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start and wait for caches. + if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the non-leaderelection Runnables after the cache has synced. + if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the leader election and all required runnables. + { + ctx, cancel := context.WithCancel(context.Background()) + cm.leaderElectionCancel = cancel + go func() { + if cm.resourceLock != nil { + if err := cm.startLeaderElection(ctx); err != nil { + cm.errChan <- err + } + } else { + // Treat not having leader election enabled the same as being elected. + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + } + close(cm.elected) + } + }() + } + + ready = true + cm.Unlock() + select { + case <-ctx.Done(): + // We are done + return nil + case err := <-cm.errChan: + // Error starting or running a runnable + return err + } +} + +// engageStopProcedure signals all runnables to stop, reads potential errors +// from the errChan and waits for them to end. It must not be called more than once. +func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error { + if !atomic.CompareAndSwapInt64(cm.stopProcedureEngaged, 0, 1) { + return errors.New("stop procedure already engaged") + } + + // Populate the shutdown context, this operation MUST be done before + // closing the internalProceduresStop channel. + // + // The shutdown context immediately expires if the gracefulShutdownTimeout is not set. + var shutdownCancel context.CancelFunc + cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) + defer shutdownCancel() + + // Start draining the errors before acquiring the lock to make sure we don't deadlock + // if something that has the lock is blocked on trying to write into the unbuffered + // channel after something else already wrote into it. + var closeOnce sync.Once + go func() { + for { + // Closing in the for loop is required to avoid race conditions between + // the closure of all internal procedures and making sure to have a reader off the error channel. + closeOnce.Do(func() { + // Cancel the internal stop channel and wait for the procedures to stop and complete. + close(cm.internalProceduresStop) + cm.internalCancel() + }) + select { + case err, ok := <-cm.errChan: + if ok { + cm.logger.Error(err, "error received after stop sequence was engaged") + } + case <-stopComplete: + return + } + } + }() + + // We want to close this after the other runnables stop, because we don't + // want things like leader election to try and emit events on a closed + // channel + defer cm.recorderProvider.Stop(cm.shutdownCtx) + defer func() { + // Cancel leader election only after we waited. It will os.Exit() the app for safety. + if cm.resourceLock != nil { + // After asking the context to be cancelled, make sure + // we wait for the leader stopped channel to be closed, otherwise + // we might encounter race conditions between this code + // and the event recorder, which is used within leader election code. + cm.leaderElectionCancel() + <-cm.leaderElectionStopped + } + }() + + go func() { + // First stop the non-leader election runnables. + cm.logger.Info("Stopping and waiting for non leader election runnables") + cm.runnables.Others.StopAndWait(cm.shutdownCtx) + + // Stop all the leader election runnables, which includes reconcilers. + cm.logger.Info("Stopping and waiting for leader election runnables") + cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) + + // Stop the caches before the leader election runnables, this is an important + // step to make sure that we don't race with the reconcilers by receiving more events + // from the API servers and enqueueing them. + cm.logger.Info("Stopping and waiting for caches") + cm.runnables.Caches.StopAndWait(cm.shutdownCtx) + + // Webhooks should come last, as they might be still serving some requests. + cm.logger.Info("Stopping and waiting for webhooks") + cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx) + + // Proceed to close the manager and overall shutdown context. + cm.logger.Info("Wait completed, proceeding to shutdown the manager") + shutdownCancel() + }() + + <-cm.shutdownCtx.Done() + if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { + if errors.Is(err, context.DeadlineExceeded) { + if cm.gracefulShutdownTimeout > 0 { + return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err) + } + return nil + } + // For any other error, return the error. + return err + } + + return nil +} + +func (cm *controllerManager) startLeaderElectionRunnables() error { + return cm.runnables.LeaderElection.Start(cm.internalCtx) +} + +func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) { + l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: cm.resourceLock, + LeaseDuration: cm.leaseDuration, + RenewDeadline: cm.renewDeadline, + RetryPeriod: cm.retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(_ context.Context) { + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + return + } + close(cm.elected) + }, + OnStoppedLeading: func() { + if cm.onStoppedLeading != nil { + cm.onStoppedLeading() + } + // Make sure graceful shutdown is skipped if we lost the leader lock without + // intending to. + cm.gracefulShutdownTimeout = time.Duration(0) + // Most implementations of leader election log.Fatal() here. + // Since Start is wrapped in log.Fatal when called, we can just return + // an error here which will cause the program to exit. + cm.errChan <- errors.New("leader election lost") + }, + }, + ReleaseOnCancel: cm.leaderElectionReleaseOnCancel, + }) + if err != nil { + return err + } + + // Start the leader elector process + go func() { + l.Run(ctx) + <-ctx.Done() + close(cm.leaderElectionStopped) + }() + return nil +} + +func (cm *controllerManager) Elected() <-chan struct{} { + return cm.elected +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go new file mode 100644 index 0000000000..53716aa9fa --- /dev/null +++ b/pkg/manager/manager.go @@ -0,0 +1,638 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "fmt" + "net" + "net/http" + "reflect" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager interface { + // Cluster holds a variety of methods to interact with a cluster. + cluster.Cluster + + // Add will set requested dependencies on the component, and cause the component to be + // started when Start is called. Add will inject any dependencies for which the argument + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). + Add(Runnable) error + + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} + + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be + // sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as + // Runnable to the manager via Add method. + AddMetricsExtraHandler(path string, handler http.Handler) error + + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + + // Start starts all registered Controllers and blocks until the context is cancelled. + // Returns an error if there is an error starting any controller. + // + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(ctx context.Context) error + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server + + // GetLogger returns this manager's logger. + GetLogger() logr.Logger + + // GetControllerOptions returns controller global configuration options. + GetControllerOptions() v1alpha1.ControllerConfigurationSpec +} + +// Options are the arguments for creating a new Manager. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". Change this value only if you know what you are doing. + // + // If you are using `configmaps`/`endpoints` resource lock and want to migrate to "leases", + // you might do so by migrating to the respective multilock first ("configmapsleases" or "endpointsleases"), + // which will acquire a leader lock on both resources. + // After all your users have migrated to the multilock, you can go ahead and migrate to "leases". + // Please also keep in mind, that users might skip versions of your controller. + // + // Note: before controller-runtime version v0.7, it was set to "configmaps". + // And from v0.7 to v0.11, the default was "configmapsleases", which was + // used to migrate from configmaps to leases. + // Since the default was "configmapsleases" for over a year, spanning five minor releases, + // any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe since v0.12. + // + // So, what do you have to do when you are updating your controller-runtime dependency + // from a lower version to v0.12 or newer? + // - If your operator matches at least one of these conditions: + // - the LeaderElectionResourceLock in your operator has already been explicitly set to "leases" + // - the old controller-runtime version is between v0.7.0 and v0.11.x and the + // LeaderElectionResourceLock wasn't set or was set to "leases"/"configmapsleases"/"endpointsleases" + // feel free to update controller-runtime to v0.12 or newer. + // - Otherwise, you may have to take these steps: + // 1. update controller-runtime to v0.12 or newer in your go.mod + // 2. set LeaderElectionResourceLock to "configmapsleases" (or "endpointsleases") + // 3. package your operator and upgrade it in all your clusters + // 4. only if you have finished 3, you can remove the LeaderElectionResourceLock to use the default "leases" + // Otherwise, your operator might end up with multiple running instances that + // each acquired leadership through different resource locks during upgrades and thus + // act on the same resources concurrently. + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string + + // LeaderElectionConfig can be specified to override the default configuration + // that is used to build the leader election client. + LeaderElectionConfig *rest.Config + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader doesn't have to wait + // LeaseDuration time first. + LeaderElectionReleaseOnCancel bool + + // LeaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. This is measured against time of + // last observed ack. Default is 15 seconds. + LeaseDuration *time.Duration + // RenewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. Default is 10 seconds. + RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. Default is 2 seconds. + RetryPeriod *time.Duration + + // Namespace, if specified, restricts the manager's cache to watch objects in + // the desired namespace. Defaults to all namespaces. + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources, the cache + // will only hold objects from the desired namespace. + Namespace string + + // MetricsBindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + MetricsBindAddress string + + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + HealthProbeBindAddress string + + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string + + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string + + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port if WebhookServer is not set. + Port int + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host if WebhookServer is not set. + Host string + + // CertDir is the directory that contains the server key and certificate. + // If not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // It is used to set webhook.Server.CertDir if WebhookServer is not set. + CertDir string + + // WebhookServer is an externally configured webhook.Server. By default, + // a Manager will create a default server using Port, Host, and CertDir; + // if this is set, the Manager will use this server instead. + WebhookServer *webhook.Server + + // Functions to allow for a user to customize values that will be injected. + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient cluster.NewClientFunc + + // BaseContext is the function that provides Context values to Runnables + // managed by the Manager. If a BaseContext function isn't provided, Runnables + // will receive a new Background Context instead. + BaseContext BaseContextFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *time.Duration + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller v1alpha1.ControllerConfigurationSpec + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsListener func(addr string) (net.Listener, error) + newHealthProbeListener func(addr string) (net.Listener, error) +} + +// BaseContextFunc is a function used to provide a base Context to Runnables +// managed by a Manager. +type BaseContextFunc func() context.Context + +// Runnable allows a component to be started. +// It's very important that Start blocks until +// it's done running. +type Runnable interface { + // Start starts running the component. The component will stop running + // when the context is closed. Start blocks until the context is closed or + // an error occurs. + Start(context.Context) error +} + +// RunnableFunc implements Runnable using a function. +// It's very important that the given function block +// until it's done running. +type RunnableFunc func(context.Context) error + +// Start implements Runnable. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) +} + +// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. +type LeaderElectionRunnable interface { + // NeedLeaderElection returns true if the Runnable needs to be run in the leader election mode. + // e.g. controllers need to be run in leader election mode, while webhook server doesn't. + NeedLeaderElection() bool +} + +// New returns a new Manager for creating Controllers. +func New(config *rest.Config, options Options) (Manager, error) { + // Set default values for options fields + options = setOptionsDefaults(options) + + cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) { + clusterOptions.Scheme = options.Scheme + clusterOptions.MapperProvider = options.MapperProvider + clusterOptions.Logger = options.Logger + clusterOptions.SyncPeriod = options.SyncPeriod + clusterOptions.Namespace = options.Namespace + clusterOptions.NewCache = options.NewCache + clusterOptions.NewClient = options.NewClient + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor + clusterOptions.DryRunClient = options.DryRunClient + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + }) + if err != nil { + return nil, err + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + // Create the resource lock to enable leader election) + var leaderConfig *rest.Config + var leaderRecorderProvider *intrec.Provider + + if options.LeaderElectionConfig == nil { + leaderConfig = rest.CopyConfig(config) + leaderRecorderProvider = recorderProvider + } else { + leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + } + + resourceLock, err := options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{ + LeaderElection: options.LeaderElection, + LeaderElectionResourceLock: options.LeaderElectionResourceLock, + LeaderElectionID: options.LeaderElectionID, + LeaderElectionNamespace: options.LeaderElectionNamespace, + }) + if err != nil { + return nil, err + } + + // Create the metrics listener. This will throw an error if the metrics bind + // address is invalid or already in use. + metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) + if err != nil { + return nil, err + } + + // By default we have no extra endpoints to expose on metrics http server. + metricsExtraHandlers := make(map[string]http.Handler) + + // Create health probes listener. This will throw an error if the bind + // address is invalid or already in use. + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) + if err != nil { + return nil, err + } + + errChan := make(chan error) + runnables := newRunnables(options.BaseContext, errChan) + + return &controllerManager{ + stopProcedureEngaged: pointer.Int64(0), + cluster: cluster, + runnables: runnables, + errChan: errChan, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + metricsListener: metricsListener, + metricsExtraHandlers: metricsExtraHandlers, + controllerOptions: options.Controller, + logger: options.Logger, + elected: make(chan struct{}), + port: options.Port, + host: options.Host, + certDir: options.CertDir, + webhookServer: options.WebhookServer, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, + gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), + leaderElectionStopped: make(chan struct{}), + leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel, + }, nil +} + +// AndFrom will use a supplied type and convert to Options +// any options already set on Options will be ignored, this is used to allow +// cli flags to override anything specified in the config file. +func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { + if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { + err := inj.InjectScheme(o.Scheme) + if err != nil { + return o, err + } + } + + newObj, err := loader.Complete() + if err != nil { + return o, err + } + + o = o.setLeaderElectionConfig(newObj) + + if o.SyncPeriod == nil && newObj.SyncPeriod != nil { + o.SyncPeriod = &newObj.SyncPeriod.Duration + } + + if o.Namespace == "" && newObj.CacheNamespace != "" { + o.Namespace = newObj.CacheNamespace + } + + if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" { + o.MetricsBindAddress = newObj.Metrics.BindAddress + } + + if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" { + o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress + } + + if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" { + o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName + } + + if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" { + o.LivenessEndpointName = newObj.Health.LivenessEndpointName + } + + if o.Port == 0 && newObj.Webhook.Port != nil { + o.Port = *newObj.Webhook.Port + } + + if o.Host == "" && newObj.Webhook.Host != "" { + o.Host = newObj.Webhook.Host + } + + if o.CertDir == "" && newObj.Webhook.CertDir != "" { + o.CertDir = newObj.Webhook.CertDir + } + + if newObj.Controller != nil { + if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + } + + if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { + o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency + } + } + + return o, nil +} + +// AndFromOrDie will use options.AndFrom() and will panic if there are errors. +func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { + o, err := o.AndFrom(loader) + if err != nil { + panic(fmt.Sprintf("could not parse config file: %v", err)) + } + return o +} + +func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { + if obj.LeaderElection == nil { + // The source does not have any configuration; noop + return o + } + + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { + o.LeaderElection = *obj.LeaderElection.LeaderElect + } + + if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" { + o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock + } + + if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" { + o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace + } + + if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" { + o.LeaderElectionID = obj.LeaderElection.ResourceName + } + + if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) { + o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration + } + + if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) { + o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration + } + + if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) { + o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration + } + + return o +} + +// defaultHealthProbeListener creates the default health probes listener bound to the given address. +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + +// defaultBaseContext is used as the BaseContext value in Options if one +// has not already been set. +func defaultBaseContext() context.Context { + return context.Background() +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Allow newResourceLock to be mocked + if options.newResourceLock == nil { + options.newResourceLock = leaderelection.NewResourceLock + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/cluster, we need it here + // for the leader election and there to provide the user with + // an EventBroadcaster + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.newMetricsListener == nil { + options.newMetricsListener = metrics.NewListener + } + leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod + if options.LeaseDuration == nil { + options.LeaseDuration = &leaseDuration + } + + if options.RenewDeadline == nil { + options.RenewDeadline = &renewDeadline + } + + if options.RetryPeriod == nil { + options.RetryPeriod = &retryPeriod + } + + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + + if options.GracefulShutdownTimeout == nil { + gracefulShutdownTimeout := defaultGracefulShutdownPeriod + options.GracefulShutdownTimeout = &gracefulShutdownTimeout + } + + if options.Logger.GetSink() == nil { + options.Logger = log.Log + } + + if options.BaseContext == nil { + options.BaseContext = defaultBaseContext + } + + return options +} diff --git a/pkg/manager/manager_options_test.go b/pkg/manager/manager_options_test.go new file mode 100644 index 0000000000..048441e56f --- /dev/null +++ b/pkg/manager/manager_options_test.go @@ -0,0 +1,54 @@ +package manager + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/config" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +var _ = Describe("manager.Options", func() { + Describe("AndFrom", func() { + Describe("reading custom type using OfKind", func() { + var ( + o Options + c customConfig + err error + ) + + JustBeforeEach(func() { + s := runtime.NewScheme() + o = Options{Scheme: s} + c = customConfig{} + + _, err = o.AndFrom(config.File().AtPath("./testdata/custom-config.yaml").OfKind(&c)) + }) + + It("should not panic or fail", func() { + Expect(err).To(Succeed()) + }) + It("should set custom properties", func() { + Expect(c.CustomValue).To(Equal("foo")) + }) + }) + }) +}) + +type customConfig struct { + metav1.TypeMeta `json:",inline"` + configv1alpha1.ControllerManagerConfigurationSpec `json:",inline"` + CustomValue string `json:"customValue"` +} + +func (in *customConfig) DeepCopyObject() runtime.Object { + out := &customConfig{} + *out = *in + + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) + + return out +} diff --git a/pkg/manager/manager_suite_test.go b/pkg/manager/manager_suite_test.go new file mode 100644 index 0000000000..3b975a9049 --- /dev/null +++ b/pkg/manager/manager_suite_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "fmt" + "net/http" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Manager Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + transport, isTransport := rt.(*http.Transport) + if !isTransport { + panic(fmt.Sprintf("wasn't able to grab underlying transport from REST client's RoundTripper, can't figure out how to close keep-alives: expected an *http.Transport, got %#v", rt)) + } + clientTransport = transport + return rt + } + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + // Prevent the metrics listener being created + metrics.DefaultBindAddress = "0" +}, 60) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) + + // Put the DefaultBindAddress back + metrics.DefaultBindAddress = ":8080" +}) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go new file mode 100644 index 0000000000..a4530688fa --- /dev/null +++ b/pkg/manager/manager_test.go @@ -0,0 +1,1835 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "path" + "reflect" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/goleak" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + fakeleaderelection "sigs.k8s.io/controller-runtime/pkg/leaderelection/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var _ = Describe("manger.Manager", func() { + Describe("New", func() { + It("should return an error if there is no Config", func() { + m, err := New(nil, Options{}) + Expect(m).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("must specify Config")) + + }) + + It("should return an error if it can't create a RestMapper", func() { + expected := fmt.Errorf("expected error: RestMapper") + m, err := New(cfg, Options{ + MapperProvider: func(c *rest.Config) (meta.RESTMapper, error) { return nil, expected }, + }) + Expect(m).To(BeNil()) + Expect(err).To(Equal(expected)) + + }) + + It("should return an error it can't create a client.Client", func() { + m, err := New(cfg, Options{ + NewClient: func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + return nil, errors.New("expected error") + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should return an error it can't create a cache.Cache", func() { + m, err := New(cfg, Options{ + NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + return nil, fmt.Errorf("expected error") + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should create a client defined in by the new client function", func() { + m, err := New(cfg, Options{ + NewClient: func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + return nil, nil + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(m.GetClient()).To(BeNil()) + }) + + It("should return an error it can't create a recorder.Provider", func() { + m, err := New(cfg, Options{ + newRecorderProvider: func(_ *rest.Config, _ *runtime.Scheme, _ logr.Logger, _ intrec.EventBroadcasterProducer) (*intrec.Provider, error) { + return nil, fmt.Errorf("expected error") + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should be able to load Options from cfg.ControllerManagerConfiguration type", func() { + duration := metav1.Duration{Duration: 48 * time.Hour} + port := int(6090) + leaderElect := false + + ccfg := &v1alpha1.ControllerManagerConfiguration{ + ControllerManagerConfigurationSpec: v1alpha1.ControllerManagerConfigurationSpec{ + SyncPeriod: &duration, + LeaderElection: &configv1alpha1.LeaderElectionConfiguration{ + LeaderElect: &leaderElect, + ResourceLock: "leases", + ResourceNamespace: "default", + ResourceName: "ctrl-lease", + LeaseDuration: duration, + RenewDeadline: duration, + RetryPeriod: duration, + }, + CacheNamespace: "default", + Metrics: v1alpha1.ControllerMetrics{ + BindAddress: ":6000", + }, + Health: v1alpha1.ControllerHealth{ + HealthProbeBindAddress: "6060", + ReadinessEndpointName: "/readyz", + LivenessEndpointName: "/livez", + }, + Webhook: v1alpha1.ControllerWebhook{ + Port: &port, + Host: "localhost", + CertDir: "/certs", + }, + }, + } + + m, err := Options{}.AndFrom(&fakeDeferredLoader{ccfg}) + Expect(err).To(BeNil()) + + Expect(*m.SyncPeriod).To(Equal(duration.Duration)) + Expect(m.LeaderElection).To(Equal(leaderElect)) + Expect(m.LeaderElectionResourceLock).To(Equal("leases")) + Expect(m.LeaderElectionNamespace).To(Equal("default")) + Expect(m.LeaderElectionID).To(Equal("ctrl-lease")) + Expect(m.LeaseDuration.String()).To(Equal(duration.Duration.String())) + Expect(m.RenewDeadline.String()).To(Equal(duration.Duration.String())) + Expect(m.RetryPeriod.String()).To(Equal(duration.Duration.String())) + Expect(m.Namespace).To(Equal("default")) + Expect(m.MetricsBindAddress).To(Equal(":6000")) + Expect(m.HealthProbeBindAddress).To(Equal("6060")) + Expect(m.ReadinessEndpointName).To(Equal("/readyz")) + Expect(m.LivenessEndpointName).To(Equal("/livez")) + Expect(m.Port).To(Equal(port)) + Expect(m.Host).To(Equal("localhost")) + Expect(m.CertDir).To(Equal("/certs")) + }) + + It("should be able to keep Options when cfg.ControllerManagerConfiguration set", func() { + optDuration := time.Duration(2) + duration := metav1.Duration{Duration: 48 * time.Hour} + port := int(6090) + leaderElect := false + + ccfg := &v1alpha1.ControllerManagerConfiguration{ + ControllerManagerConfigurationSpec: v1alpha1.ControllerManagerConfigurationSpec{ + SyncPeriod: &duration, + LeaderElection: &configv1alpha1.LeaderElectionConfiguration{ + LeaderElect: &leaderElect, + ResourceLock: "leases", + ResourceNamespace: "default", + ResourceName: "ctrl-lease", + LeaseDuration: duration, + RenewDeadline: duration, + RetryPeriod: duration, + }, + CacheNamespace: "default", + Metrics: v1alpha1.ControllerMetrics{ + BindAddress: ":6000", + }, + Health: v1alpha1.ControllerHealth{ + HealthProbeBindAddress: "6060", + ReadinessEndpointName: "/readyz", + LivenessEndpointName: "/livez", + }, + Webhook: v1alpha1.ControllerWebhook{ + Port: &port, + Host: "localhost", + CertDir: "/certs", + }, + }, + } + + m, err := Options{ + SyncPeriod: &optDuration, + LeaderElection: true, + LeaderElectionResourceLock: "configmaps", + LeaderElectionNamespace: "ctrl", + LeaderElectionID: "ctrl-configmap", + LeaseDuration: &optDuration, + RenewDeadline: &optDuration, + RetryPeriod: &optDuration, + Namespace: "ctrl", + MetricsBindAddress: ":7000", + HealthProbeBindAddress: "5000", + ReadinessEndpointName: "/readiness", + LivenessEndpointName: "/liveness", + Port: 8080, + Host: "example.com", + CertDir: "/pki", + }.AndFrom(&fakeDeferredLoader{ccfg}) + Expect(err).To(BeNil()) + + Expect(m.SyncPeriod.String()).To(Equal(optDuration.String())) + Expect(m.LeaderElection).To(Equal(true)) + Expect(m.LeaderElectionResourceLock).To(Equal("configmaps")) + Expect(m.LeaderElectionNamespace).To(Equal("ctrl")) + Expect(m.LeaderElectionID).To(Equal("ctrl-configmap")) + Expect(m.LeaseDuration.String()).To(Equal(optDuration.String())) + Expect(m.RenewDeadline.String()).To(Equal(optDuration.String())) + Expect(m.RetryPeriod.String()).To(Equal(optDuration.String())) + Expect(m.Namespace).To(Equal("ctrl")) + Expect(m.MetricsBindAddress).To(Equal(":7000")) + Expect(m.HealthProbeBindAddress).To(Equal("5000")) + Expect(m.ReadinessEndpointName).To(Equal("/readiness")) + Expect(m.LivenessEndpointName).To(Equal("/liveness")) + Expect(m.Port).To(Equal(8080)) + Expect(m.Host).To(Equal("example.com")) + Expect(m.CertDir).To(Equal("/pki")) + }) + + It("should lazily initialize a webhook server if needed", func() { + By("creating a manager with options") + m, err := New(cfg, Options{Port: 9440, Host: "foo.com"}) + Expect(err).NotTo(HaveOccurred()) + Expect(m).NotTo(BeNil()) + + By("checking options are passed to the webhook server") + svr := m.GetWebhookServer() + Expect(svr).NotTo(BeNil()) + Expect(svr.Port).To(Equal(9440)) + Expect(svr.Host).To(Equal("foo.com")) + }) + + It("should not initialize a webhook server if Options.WebhookServer is set", func() { + By("creating a manager with options") + m, err := New(cfg, Options{Port: 9441, WebhookServer: &webhook.Server{Port: 9440}}) + Expect(err).NotTo(HaveOccurred()) + Expect(m).NotTo(BeNil()) + + By("checking the server contains the Port set on the webhook server and not passed to Options") + svr := m.GetWebhookServer() + Expect(svr).NotTo(BeNil()) + Expect(svr.Port).To(Equal(9440)) + }) + + Context("with leader election enabled", func() { + It("should only cancel the leader election after all runnables are done", func() { + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id-2", + HealthProbeBindAddress: "0", + MetricsBindAddress: "0", + }) + Expect(err).To(BeNil()) + + runnableDone := make(chan struct{}) + slowRunnable := RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(100 * time.Millisecond) + close(runnableDone) + return nil + }) + Expect(m.Add(slowRunnable)).To(BeNil()) + + cm := m.(*controllerManager) + cm.gracefulShutdownTimeout = time.Second + leaderElectionDone := make(chan struct{}) + cm.onStoppedLeading = func() { + close(leaderElectionDone) + } + + ctx, cancel := context.WithCancel(context.Background()) + mgrDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(BeNil()) + close(mgrDone) + }() + <-cm.Elected() + cancel() + select { + case <-leaderElectionDone: + Expect(errors.New("leader election was cancelled before runnables were done")).ToNot(HaveOccurred()) + case <-runnableDone: + // Success + } + // Don't leak routines + <-mgrDone + + }) + It("should disable gracefulShutdown when stopping to lead", func() { + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id-3", + HealthProbeBindAddress: "0", + MetricsBindAddress: "0", + }) + Expect(err).To(BeNil()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mgrDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + err := m.Start(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(ContainSubstring("leader election lost")) + close(mgrDone) + }() + cm := m.(*controllerManager) + <-cm.elected + + cm.leaderElectionCancel() + <-mgrDone + + Expect(cm.gracefulShutdownTimeout.Nanoseconds()).To(Equal(int64(0))) + }) + It("should default ID to controller-runtime if ID is not set", func() { + var rl resourcelock.Interface + m1, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + MetricsBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m1).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1cm, ok := m1.(*controllerManager) + Expect(ok).To(BeTrue()) + m1cm.onStoppedLeading = func() {} + + m2, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + MetricsBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m2).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m2cm, ok := m2.(*controllerManager) + Expect(ok).To(BeTrue()) + m2cm.onStoppedLeading = func() {} + + c1 := make(chan struct{}) + Expect(m1.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + close(c1) + return nil + }))).To(Succeed()) + + ctx1, cancel1 := context.WithCancel(context.Background()) + defer cancel1() + go func() { + defer GinkgoRecover() + Expect(m1.Elected()).ShouldNot(BeClosed()) + Expect(m1.Start(ctx1)).NotTo(HaveOccurred()) + }() + <-m1.Elected() + <-c1 + + c2 := make(chan struct{}) + Expect(m2.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + close(c2) + return nil + }))).To(Succeed()) + + ctx2, cancel := context.WithCancel(context.Background()) + m2done := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m2.Start(ctx2)).NotTo(HaveOccurred()) + close(m2done) + }() + Consistently(m2.Elected()).ShouldNot(Receive()) + + Consistently(c2).ShouldNot(Receive()) + cancel() + <-m2done + }) + + It("should return an error if it can't create a ResourceLock", func() { + m, err := New(cfg, Options{ + newResourceLock: func(_ *rest.Config, _ recorder.Provider, _ leaderelection.Options) (resourcelock.Interface, error) { + return nil, fmt.Errorf("expected error") + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(MatchError(ContainSubstring("expected error"))) + }) + + It("should return an error if namespace not set and not running in cluster", func() { + m, err := New(cfg, Options{LeaderElection: true, LeaderElectionID: "controller-runtime"}) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to find leader election namespace: not running in-cluster, please specify LeaderElectionNamespace")) + }) + + // We must keep this default until we are sure all controller-runtime users have upgraded from the original default + // ConfigMap lock to a controller-runtime version that has this new default. Many users of controller-runtime skip + // versions, so we should be extremely conservative here. + It("should default to LeasesResourceLock", func() { + m, err := New(cfg, Options{LeaderElection: true, LeaderElectionID: "controller-runtime", LeaderElectionNamespace: "my-ns"}) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + _, isLeaseLock := cm.resourceLock.(*resourcelock.LeaseLock) + Expect(isLeaseLock).To(BeTrue()) + + }) + It("should use the specified ResourceLock", func() { + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLock: resourcelock.ConfigMapsLeasesResourceLock, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "my-ns", + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + multilock, isMultiLock := cm.resourceLock.(*resourcelock.MultiLock) + Expect(isMultiLock).To(BeTrue()) + primaryLockType := reflect.TypeOf(multilock.Primary) + Expect(primaryLockType.Kind()).To(Equal(reflect.Ptr)) + Expect(primaryLockType.Elem().PkgPath()).To(Equal("k8s.io/client-go/tools/leaderelection/resourcelock")) + Expect(primaryLockType.Elem().Name()).To(Equal("configMapLock")) + _, secondaryIsLeaseLock := multilock.Secondary.(*resourcelock.LeaseLock) + Expect(secondaryIsLeaseLock).To(BeTrue()) + }) + It("should release lease if ElectionReleaseOnCancel is true", func() { + var rl resourcelock.Interface + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "my-ns", + LeaderElectionReleaseOnCancel: true, + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = fakeleaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + }) + Expect(err).To(BeNil()) + + ctx, cancel := context.WithCancel(context.Background()) + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.(*controllerManager).elected + cancel() + <-doneCh + + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + record, _, err := rl.Get(ctx) + Expect(err).To(BeNil()) + Expect(record.HolderIdentity).To(BeEmpty()) + }) + }) + + It("should create a listener for the metrics if a valid address is provided", func() { + var listener net.Listener + m, err := New(cfg, Options{ + MetricsBindAddress: ":0", + newMetricsListener: func(addr string) (net.Listener, error) { + var err error + listener, err = metrics.NewListener(addr) + return listener, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(listener).ToNot(BeNil()) + Expect(listener.Close()).ToNot(HaveOccurred()) + }) + + It("should return an error if the metrics bind address is already in use", func() { + ln, err := metrics.NewListener(":0") + Expect(err).ShouldNot(HaveOccurred()) + + var listener net.Listener + m, err := New(cfg, Options{ + MetricsBindAddress: ln.Addr().String(), + newMetricsListener: func(addr string) (net.Listener, error) { + var err error + listener, err = metrics.NewListener(addr) + return listener, err + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(listener).To(BeNil()) + + Expect(ln.Close()).ToNot(HaveOccurred()) + }) + + It("should create a listener for the health probes if a valid address is provided", func() { + var listener net.Listener + m, err := New(cfg, Options{ + HealthProbeBindAddress: ":0", + newHealthProbeListener: func(addr string) (net.Listener, error) { + var err error + listener, err = defaultHealthProbeListener(addr) + return listener, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(listener).ToNot(BeNil()) + Expect(listener.Close()).ToNot(HaveOccurred()) + }) + + It("should return an error if the health probes bind address is already in use", func() { + ln, err := defaultHealthProbeListener(":0") + Expect(err).ShouldNot(HaveOccurred()) + + var listener net.Listener + m, err := New(cfg, Options{ + HealthProbeBindAddress: ln.Addr().String(), + newHealthProbeListener: func(addr string) (net.Listener, error) { + var err error + listener, err = defaultHealthProbeListener(addr) + return listener, err + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(listener).To(BeNil()) + + Expect(ln.Close()).ToNot(HaveOccurred()) + }) + }) + + Describe("Start", func() { + var startSuite = func(options Options, callbacks ...func(Manager)) { + It("should Start each Component", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + var wgRunnableStarted sync.WaitGroup + wgRunnableStarted.Add(2) + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + wgRunnableStarted.Done() + return nil + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + wgRunnableStarted.Done() + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Elected()).ShouldNot(BeClosed()) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + + <-m.Elected() + wgRunnableStarted.Wait() + }) + + It("should not manipulate the provided config", func() { + // strip WrapTransport, cause func values are PartialEq, not Eq -- + // specifically, for reflect.DeepEqual, for all functions F, + // F != nil implies F != F, which means no full equivalence relation. + cfg := rest.CopyConfig(cfg) + cfg.WrapTransport = nil + originalCfg := rest.CopyConfig(cfg) + // The options object is shared by multiple tests, copy it + // into our scope so we manipulate it for this testcase only + options := options + options.newResourceLock = nil + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + Expect(m.GetConfig()).To(Equal(originalCfg)) + }) + + It("should stop when context is cancelled", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }) + + It("should return an error if it can't start the cache", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(mgr.Add( + &cacheProvider{cache: &informertest.FakeInformers{Error: fmt.Errorf("expected error")}}, + )).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + Expect(m.Start(ctx)).To(MatchError(ContainSubstring("expected error"))) + }) + + It("should start the cache before starting anything else", func() { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + runnableWasStarted := make(chan struct{}) + runnable := RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("runnable got started before cache was synced") + } + close(runnableWasStarted) + return nil + }) + Expect(m.Add(runnable)).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + }) + + It("should start additional clusters before anything else", func() { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + additionalClusterCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + additionalCluster, err := cluster.New(cfg, func(o *cluster.Options) { + o.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return additionalClusterCache, nil + } + }) + Expect(err).NotTo(HaveOccurred()) + Expect(m.Add(additionalCluster)).NotTo(HaveOccurred()) + + runnableWasStarted := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("WaitForCacheSyncCalled wasn't called before Runnable got started") + } + if !additionalClusterCache.wasSynced { + return errors.New("the additional clusters WaitForCacheSync wasn't called before Runnable got started") + } + close(runnableWasStarted) + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + }) + + It("should return an error if any Components fail to Start", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + <-ctx.Done() + return nil + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return fmt.Errorf("expected error") + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return nil + }))).To(Succeed()) + + defer GinkgoRecover() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = m.Start(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(Equal("expected error")) + }) + + It("should start caches added after Manager has started", func() { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + runnableWasStarted := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("WaitForCacheSyncCalled wasn't called before Runnable got started") + } + close(runnableWasStarted) + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + + additionalClusterCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + fakeCluster := &startClusterAfterManager{informer: additionalClusterCache} + + Expect(err).NotTo(HaveOccurred()) + Expect(m.Add(fakeCluster)).NotTo(HaveOccurred()) + + Eventually(func() bool { + fakeCluster.informer.mu.Lock() + defer fakeCluster.informer.mu.Unlock() + return fakeCluster.informer.wasStarted && fakeCluster.informer.wasSynced + }).Should(BeTrue()) + }) + + It("should wait for runnables to stop", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + var lock sync.Mutex + var runnableDoneCount int64 + runnableDoneFunc := func() { + lock.Lock() + defer lock.Unlock() + atomic.AddInt64(&runnableDoneCount, 1) + } + var wgRunnableRunning sync.WaitGroup + wgRunnableRunning.Add(2) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + return nil + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + time.Sleep(300 * time.Millisecond) // slow closure simulation + return nil + }))).To(Succeed()) + + defer GinkgoRecover() + ctx, cancel := context.WithCancel(context.Background()) + + var wgManagerRunning sync.WaitGroup + wgManagerRunning.Add(1) + go func() { + defer GinkgoRecover() + defer wgManagerRunning.Done() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + Eventually(func() int64 { + return atomic.LoadInt64(&runnableDoneCount) + }).Should(BeEquivalentTo(2)) + }() + wgRunnableRunning.Wait() + cancel() + + wgManagerRunning.Wait() + }) + + It("should return an error if any Components fail to Start and wait for runnables to stop", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + defer GinkgoRecover() + var lock sync.Mutex + runnableDoneCount := 0 + runnableDoneFunc := func() { + lock.Lock() + defer lock.Unlock() + runnableDoneCount++ + } + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + defer runnableDoneFunc() + return fmt.Errorf("expected error") + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + Expect(m.Start(ctx)).To(HaveOccurred()) + Expect(runnableDoneCount).To(Equal(2)) + }) + + It("should refuse to add runnable if stop procedure is already engaged", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + defer GinkgoRecover() + + var wgRunnableRunning sync.WaitGroup + wgRunnableRunning.Add(1) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + <-ctx.Done() + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + wgRunnableRunning.Wait() + cancel() + time.Sleep(100 * time.Millisecond) // give some time for the stop chan closure to be caught by the manager + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return nil + }))).NotTo(Succeed()) + }) + + It("should return both runnables and stop errors when both error", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = 1 * time.Nanosecond + Expect(m.Add(RunnableFunc(func(context.Context) error { + return runnableError{} + }))) + testDone := make(chan struct{}) + defer close(testDone) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + select { + case <-testDone: + return nil + case <-timer.C: + return nil + } + }))) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = m.Start(ctx) + Expect(err).ToNot(BeNil()) + eMsg := "[not feeling like that, failed waiting for all runnables to end within grace period of 1ns: context deadline exceeded]" + Expect(err.Error()).To(Equal(eMsg)) + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + Expect(errors.Is(err, runnableError{})).To(BeTrue()) + }) + + It("should return only stop errors if runnables dont error", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = 1 * time.Nanosecond + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + return nil + }))) + testDone := make(chan struct{}) + defer close(testDone) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + select { + case <-testDone: + return nil + case <-timer.C: + return nil + } + }))).NotTo(HaveOccurred()) + ctx, cancel := context.WithCancel(context.Background()) + managerStopDone := make(chan struct{}) + go func() { err = m.Start(ctx); close(managerStopDone) }() + // Use the 'elected' channel to find out if startup was done, otherwise we stop + // before we started the Runnable and see flakes, mostly in low-CPU envs like CI + <-m.(*controllerManager).elected + cancel() + <-managerStopDone + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(Equal("failed waiting for all runnables to end within grace period of 1ns: context deadline exceeded")) + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + Expect(errors.Is(err, runnableError{})).ToNot(BeTrue()) + }) + + It("should return only runnables error if stop doesn't error", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + Expect(m.Add(RunnableFunc(func(context.Context) error { + return runnableError{} + }))) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = m.Start(ctx) + Expect(err).ToNot(BeNil()) + Expect(err.Error()).To(Equal("not feeling like that")) + Expect(errors.Is(err, context.DeadlineExceeded)).ToNot(BeTrue()) + Expect(errors.Is(err, runnableError{})).To(BeTrue()) + }) + + It("should not wait for runnables if gracefulShutdownTimeout is 0", func() { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = time.Duration(0) + + runnableStopped := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(100 * time.Millisecond) + close(runnableStopped) + return nil + }))).ToNot(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + managerStopDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + close(managerStopDone) + }() + <-m.Elected() + cancel() + + <-managerStopDone + <-runnableStopped + }) + + } + + Context("with defaults", func() { + startSuite(Options{}) + }) + + Context("with leaderelection enabled", func() { + startSuite( + Options{ + LeaderElection: true, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "default", + newResourceLock: fakeleaderelection.NewResourceLock, + }, + func(m Manager) { + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + cm.onStoppedLeading = func() {} + }, + ) + }) + + Context("should start serving metrics", func() { + var listener net.Listener + var opts Options + + BeforeEach(func() { + listener = nil + opts = Options{ + newMetricsListener: func(addr string) (net.Listener, error) { + var err error + listener, err = metrics.NewListener(addr) + return listener, err + }, + } + }) + + AfterEach(func() { + if listener != nil { + listener.Close() + } + }) + + It("should stop serving metrics when stop is called", func() { + opts.MetricsBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + + // Check the metrics started + endpoint := fmt.Sprintf("http://%s", listener.Addr().String()) + _, err = http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + + // Shutdown the server + cancel() + + // Expect the metrics server to shutdown + Eventually(func() error { + _, err = http.Get(endpoint) + return err + }).ShouldNot(Succeed()) + }) + + It("should serve metrics endpoint", func() { + opts.MetricsBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + metricsEndpoint := fmt.Sprintf("http://%s/metrics", listener.Addr().String()) + resp, err := http.Get(metricsEndpoint) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(200)) + }) + + It("should not serve anything other than metrics endpoint by default", func() { + opts.MetricsBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + endpoint := fmt.Sprintf("http://%s/should-not-exist", listener.Addr().String()) + resp, err := http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(404)) + }) + + It("should serve metrics in its registry", func() { + one := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "test_one", + Help: "test metric for testing", + }) + one.Inc() + err := metrics.Registry.Register(one) + Expect(err).NotTo(HaveOccurred()) + + opts.MetricsBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + metricsEndpoint := fmt.Sprintf("http://%s/metrics", listener.Addr().String()) + resp, err := http.Get(metricsEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(200)) + + data, err := io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(data)).To(ContainSubstring("%s\n%s\n%s\n", + `# HELP test_one test metric for testing`, + `# TYPE test_one counter`, + `test_one 1`, + )) + + // Unregister will return false if the metric was never registered + ok := metrics.Registry.Unregister(one) + Expect(ok).To(BeTrue()) + }) + + It("should serve extra endpoints", func() { + opts.MetricsBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + err = m.AddMetricsExtraHandler("/debug", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Some debug info")) + })) + Expect(err).NotTo(HaveOccurred()) + + // Should error when we add another extra endpoint on the already registered path. + err = m.AddMetricsExtraHandler("/debug", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Another debug info")) + })) + Expect(err).To(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + endpoint := fmt.Sprintf("http://%s/debug", listener.Addr().String()) + resp, err := http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + body, err := io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal("Some debug info")) + }) + }) + }) + + Context("should start serving health probes", func() { + var listener net.Listener + var opts Options + + BeforeEach(func() { + listener = nil + opts = Options{ + newHealthProbeListener: func(addr string) (net.Listener, error) { + var err error + listener, err = defaultHealthProbeListener(addr) + return listener, err + }, + } + }) + + AfterEach(func() { + if listener != nil { + listener.Close() + } + }) + + It("should stop serving health probes when stop is called", func() { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + // Check the health probes started + endpoint := fmt.Sprintf("http://%s", listener.Addr().String()) + _, err = http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + + // Shutdown the server + cancel() + + // Expect the health probes server to shutdown + Eventually(func() error { + _, err = http.Get(endpoint) + return err + }, 10*time.Second).ShouldNot(Succeed()) + }) + + It("should serve readiness endpoint", func() { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + res := fmt.Errorf("not ready yet") + namedCheck := "check" + err = m.AddReadyzCheck(namedCheck, func(_ *http.Request) error { return res }) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + readinessEndpoint := fmt.Sprint("http://", listener.Addr().String(), defaultReadinessEndpoint) + + // Controller is not ready + resp, err := http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError)) + + // Controller is ready + res = nil + resp, err = http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path without trailing slash without redirect + readinessEndpoint = fmt.Sprint("http://", listener.Addr().String(), defaultReadinessEndpoint) + res = nil + httpClient := http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Do not follow redirect + }, + } + resp, err = httpClient.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path for individual check + readinessEndpoint = fmt.Sprint("http://", listener.Addr().String(), path.Join(defaultReadinessEndpoint, namedCheck)) + res = nil + resp, err = http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + }) + + It("should serve liveness endpoint", func() { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + res := fmt.Errorf("not alive") + namedCheck := "check" + err = m.AddHealthzCheck(namedCheck, func(_ *http.Request) error { return res }) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + livenessEndpoint := fmt.Sprint("http://", listener.Addr().String(), defaultLivenessEndpoint) + + // Controller is not ready + resp, err := http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError)) + + // Controller is ready + res = nil + resp, err = http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check liveness path without trailing slash without redirect + livenessEndpoint = fmt.Sprint("http://", listener.Addr().String(), defaultLivenessEndpoint) + res = nil + httpClient := http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Do not follow redirect + }, + } + resp, err = httpClient.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path for individual check + livenessEndpoint = fmt.Sprint("http://", listener.Addr().String(), path.Join(defaultLivenessEndpoint, namedCheck)) + res = nil + resp, err = http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + }) + }) + + Describe("Add", func() { + It("should immediately start the Component if the Manager has already Started another Component", + func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + + // Add one component before starting + c1 := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + close(c1) + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + // Wait for the Manager to start + Eventually(func() bool { + return mgr.runnables.Caches.Started() + }).Should(BeTrue()) + + // Add another component after starting + c2 := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + close(c2) + return nil + }))).To(Succeed()) + <-c1 + <-c2 + }) + + It("should immediately start the Component if the Manager has already Started", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + + // Wait for the Manager to start + Eventually(func() bool { + return mgr.runnables.Caches.Started() + }).Should(BeTrue()) + + c1 := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + close(c1) + return nil + }))).To(Succeed()) + <-c1 + }) + + It("should fail if SetFields fails", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(m.Add(&failRec{})).To(HaveOccurred()) + }) + }) + Describe("SetFields", func() { + It("should inject field values", func() { + m, err := New(cfg, Options{ + NewCache: func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return &informertest.FakeInformers{}, nil + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Injecting the dependencies") + err = m.SetFields(&injectable{ + scheme: func(scheme *runtime.Scheme) error { + defer GinkgoRecover() + Expect(scheme).To(Equal(m.GetScheme())) + return nil + }, + config: func(config *rest.Config) error { + defer GinkgoRecover() + Expect(config).To(Equal(m.GetConfig())) + return nil + }, + client: func(client client.Client) error { + defer GinkgoRecover() + Expect(client).To(Equal(m.GetClient())) + return nil + }, + cache: func(c cache.Cache) error { + defer GinkgoRecover() + Expect(c).To(Equal(m.GetCache())) + return nil + }, + stop: func(stop <-chan struct{}) error { + defer GinkgoRecover() + Expect(stop).NotTo(BeNil()) + return nil + }, + f: func(f inject.Func) error { + defer GinkgoRecover() + Expect(f).NotTo(BeNil()) + return nil + }, + log: func(logger logr.Logger) error { + defer GinkgoRecover() + Expect(logger).To(Equal(log.Log)) + return nil + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Returning an error if dependency injection fails") + + expected := fmt.Errorf("expected error") + err = m.SetFields(&injectable{ + client: func(client client.Client) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = m.SetFields(&injectable{ + scheme: func(scheme *runtime.Scheme) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = m.SetFields(&injectable{ + config: func(config *rest.Config) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = m.SetFields(&injectable{ + cache: func(c cache.Cache) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = m.SetFields(&injectable{ + f: func(c inject.Func) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + + err = m.SetFields(&injectable{ + stop: func(<-chan struct{}) error { + return expected + }, + }) + Expect(err).To(Equal(expected)) + }) + }) + + It("should not leak goroutines when stopped", func() { + currentGRs := goleak.IgnoreCurrent() + + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should not leak goroutines if the default event broadcaster is used & events are emitted", func() { + currentGRs := goleak.IgnoreCurrent() + + m, err := New(cfg, Options{ /* implicit: default setting for EventBroadcaster */ }) + Expect(err).NotTo(HaveOccurred()) + + By("adding a runnable that emits an event") + ns := corev1.Namespace{} + ns.Name = "default" + + recorder := m.GetEventRecorderFor("rock-and-roll") + Expect(m.Add(RunnableFunc(func(_ context.Context) error { + recorder.Event(&ns, "Warning", "BallroomBlitz", "yeah, yeah, yeah-yeah-yeah") + return nil + }))).To(Succeed()) + + By("starting the manager & waiting till we've sent our event") + ctx, cancel := context.WithCancel(context.Background()) + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(m.Start(ctx)).To(Succeed()) + }() + <-m.Elected() + + Eventually(func() *corev1.Event { + evts, err := clientset.CoreV1().Events("").Search(m.GetScheme(), &ns) + Expect(err).NotTo(HaveOccurred()) + + for i, evt := range evts.Items { + if evt.Reason == "BallroomBlitz" { + return &evts.Items[i] + } + } + return nil + }).ShouldNot(BeNil()) + + By("making sure there's no extra go routines still running after we stop") + cancel() + <-doneCh + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should provide a function to get the Config", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(m.GetConfig()).To(Equal(mgr.cluster.GetConfig())) + }) + + It("should provide a function to get the Client", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(m.GetClient()).To(Equal(mgr.cluster.GetClient())) + }) + + It("should provide a function to get the Scheme", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(m.GetScheme()).To(Equal(mgr.cluster.GetScheme())) + }) + + It("should provide a function to get the FieldIndexer", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(m.GetFieldIndexer()).To(Equal(mgr.cluster.GetFieldIndexer())) + }) + + It("should provide a function to get the EventRecorder", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(m.GetEventRecorderFor("test")).NotTo(BeNil()) + }) + It("should provide a function to get the APIReader", func() { + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(m.GetAPIReader()).NotTo(BeNil()) + }) +}) + +var _ reconcile.Reconciler = &failRec{} +var _ inject.Client = &failRec{} + +type failRec struct{} + +func (*failRec) Reconcile(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func (*failRec) Start(context.Context) error { + return nil +} + +func (*failRec) InjectClient(client.Client) error { + return fmt.Errorf("expected error") +} + +var _ inject.Injector = &injectable{} +var _ inject.Cache = &injectable{} +var _ inject.Client = &injectable{} +var _ inject.Scheme = &injectable{} +var _ inject.Config = &injectable{} +var _ inject.Stoppable = &injectable{} +var _ inject.Logger = &injectable{} + +type injectable struct { + scheme func(scheme *runtime.Scheme) error + client func(client.Client) error + config func(config *rest.Config) error + cache func(cache.Cache) error + f func(inject.Func) error + stop func(<-chan struct{}) error + log func(logger logr.Logger) error +} + +func (i *injectable) InjectCache(c cache.Cache) error { + if i.cache == nil { + return nil + } + return i.cache(c) +} + +func (i *injectable) InjectConfig(config *rest.Config) error { + if i.config == nil { + return nil + } + return i.config(config) +} + +func (i *injectable) InjectClient(c client.Client) error { + if i.client == nil { + return nil + } + return i.client(c) +} + +func (i *injectable) InjectScheme(scheme *runtime.Scheme) error { + if i.scheme == nil { + return nil + } + return i.scheme(scheme) +} + +func (i *injectable) InjectFunc(f inject.Func) error { + if i.f == nil { + return nil + } + return i.f(f) +} + +func (i *injectable) InjectStopChannel(stop <-chan struct{}) error { + if i.stop == nil { + return nil + } + return i.stop(stop) +} + +func (i *injectable) InjectLogger(log logr.Logger) error { + if i.log == nil { + return nil + } + return i.log(log) +} + +func (i *injectable) Start(<-chan struct{}) error { + return nil +} + +type runnableError struct { +} + +func (runnableError) Error() string { + return "not feeling like that" +} + +type fakeDeferredLoader struct { + *v1alpha1.ControllerManagerConfiguration +} + +func (f *fakeDeferredLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { + return f.ControllerManagerConfiguration.ControllerManagerConfigurationSpec, nil +} + +func (f *fakeDeferredLoader) InjectScheme(scheme *runtime.Scheme) error { + return nil +} + +var _ Runnable = &cacheProvider{} + +type cacheProvider struct { + cache cache.Cache +} + +func (c *cacheProvider) GetCache() cache.Cache { + return c.cache +} + +func (c *cacheProvider) Start(ctx context.Context) error { + return c.cache.Start(ctx) +} + +type startSignalingInformer struct { + mu sync.Mutex + + // The manager calls Start and WaitForCacheSync in + // parallel, so we have to protect wasStarted with a Mutex + // and block in WaitForCacheSync until it is true. + wasStarted bool + // was synced will be true once Start was called and + // WaitForCacheSync returned, just like a real cache. + wasSynced bool + cache.Cache +} + +func (c *startSignalingInformer) Start(ctx context.Context) error { + c.mu.Lock() + c.wasStarted = true + c.mu.Unlock() + return c.Cache.Start(ctx) +} + +func (c *startSignalingInformer) WaitForCacheSync(ctx context.Context) bool { + defer func() { + c.mu.Lock() + c.wasSynced = true + c.mu.Unlock() + }() + return c.Cache.WaitForCacheSync(ctx) +} + +type startClusterAfterManager struct { + informer *startSignalingInformer +} + +func (c *startClusterAfterManager) Start(ctx context.Context) error { + return c.informer.Start(ctx) +} + +func (c *startClusterAfterManager) GetCache() cache.Cache { + return c.informer +} diff --git a/pkg/manager/runnable_group.go b/pkg/manager/runnable_group.go new file mode 100644 index 0000000000..f7b91a209f --- /dev/null +++ b/pkg/manager/runnable_group.go @@ -0,0 +1,297 @@ +package manager + +import ( + "context" + "errors" + "sync" + + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var ( + errRunnableGroupStopped = errors.New("can't accept new runnable as stop procedure is already engaged") +) + +// readyRunnable encapsulates a runnable with +// a ready check. +type readyRunnable struct { + Runnable + Check runnableCheck + signalReady bool +} + +// runnableCheck can be passed to Add() to let the runnable group determine that a +// runnable is ready. A runnable check should block until a runnable is ready, +// if the returned result is false, the runnable is considered not ready and failed. +type runnableCheck func(ctx context.Context) bool + +// runnables handles all the runnables for a manager by grouping them accordingly to their +// type (webhooks, caches etc.). +type runnables struct { + Webhooks *runnableGroup + Caches *runnableGroup + LeaderElection *runnableGroup + Others *runnableGroup +} + +// newRunnables creates a new runnables object. +func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { + return &runnables{ + Webhooks: newRunnableGroup(baseContext, errChan), + Caches: newRunnableGroup(baseContext, errChan), + LeaderElection: newRunnableGroup(baseContext, errChan), + Others: newRunnableGroup(baseContext, errChan), + } +} + +// Add adds a runnable to closest group of runnable that they belong to. +// +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +// The runnables added before Start are started when Start is called. +// The runnables added after Start are started directly. +func (r *runnables) Add(fn Runnable) error { + switch runnable := fn.(type) { + case hasCache: + return r.Caches.Add(fn, func(ctx context.Context) bool { + return runnable.GetCache().WaitForCacheSync(ctx) + }) + case *webhook.Server: + return r.Webhooks.Add(fn, nil) + case LeaderElectionRunnable: + if !runnable.NeedLeaderElection() { + return r.Others.Add(fn, nil) + } + return r.LeaderElection.Add(fn, nil) + default: + return r.LeaderElection.Add(fn, nil) + } +} + +// runnableGroup manages a group of runnables that are +// meant to be running together until StopAndWait is called. +// +// Runnables can be added to a group after the group has started +// but not after it's stopped or while shutting down. +type runnableGroup struct { + ctx context.Context + cancel context.CancelFunc + + start sync.Mutex + startOnce sync.Once + started bool + startQueue []*readyRunnable + startReadyCh chan *readyRunnable + + stop sync.RWMutex + stopOnce sync.Once + stopped bool + + // errChan is the error channel passed by the caller + // when the group is created. + // All errors are forwarded to this channel once they occur. + errChan chan error + + // ch is the internal channel where the runnables are read off from. + ch chan *readyRunnable + + // wg is an internal sync.WaitGroup that allows us to properly stop + // and wait for all the runnables to finish before returning. + wg *sync.WaitGroup +} + +func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup { + r := &runnableGroup{ + startReadyCh: make(chan *readyRunnable), + errChan: errChan, + ch: make(chan *readyRunnable), + wg: new(sync.WaitGroup), + } + + r.ctx, r.cancel = context.WithCancel(baseContext()) + return r +} + +// Started returns true if the group has started. +func (r *runnableGroup) Started() bool { + r.start.Lock() + defer r.start.Unlock() + return r.started +} + +// Start starts the group and waits for all +// initially registered runnables to start. +// It can only be called once, subsequent calls have no effect. +func (r *runnableGroup) Start(ctx context.Context) error { + var retErr error + + r.startOnce.Do(func() { + defer close(r.startReadyCh) + + // Start the internal reconciler. + go r.reconcile() + + // Start the group and queue up all + // the runnables that were added prior. + r.start.Lock() + r.started = true + for _, rn := range r.startQueue { + rn.signalReady = true + r.ch <- rn + } + r.start.Unlock() + + // If we don't have any queue, return. + if len(r.startQueue) == 0 { + return + } + + // Wait for all runnables to signal. + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + retErr = err + } + case rn := <-r.startReadyCh: + for i, existing := range r.startQueue { + if existing == rn { + // Remove the item from the start queue. + r.startQueue = append(r.startQueue[:i], r.startQueue[i+1:]...) + break + } + } + // We're done waiting if the queue is empty, return. + if len(r.startQueue) == 0 { + return + } + } + } + }) + + return retErr +} + +// reconcile is our main entrypoint for every runnable added +// to this group. Its primary job is to read off the internal channel +// and schedule runnables while tracking their state. +func (r *runnableGroup) reconcile() { + for runnable := range r.ch { + // Handle stop. + // If the shutdown has been called we want to avoid + // adding new goroutines to the WaitGroup because Wait() + // panics if Add() is called after it. + { + r.stop.RLock() + if r.stopped { + // Drop any runnables if we're stopped. + r.errChan <- errRunnableGroupStopped + r.stop.RUnlock() + continue + } + + // Why is this here? + // When StopAndWait is called, if a runnable is in the process + // of being added, we could end up in a situation where + // the WaitGroup is incremented while StopAndWait has called Wait(), + // which would result in a panic. + r.wg.Add(1) + r.stop.RUnlock() + } + + // Start the runnable. + go func(rn *readyRunnable) { + go func() { + if rn.Check(r.ctx) { + if rn.signalReady { + r.startReadyCh <- rn + } + } + }() + + // If we return, the runnable ended cleanly + // or returned an error to the channel. + // + // We should always decrement the WaitGroup here. + defer r.wg.Done() + + // Start the runnable. + if err := rn.Start(r.ctx); err != nil { + r.errChan <- err + } + }(runnable) + } +} + +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { + r.stop.RLock() + if r.stopped { + r.stop.RUnlock() + return errRunnableGroupStopped + } + r.stop.RUnlock() + + if ready == nil { + ready = func(_ context.Context) bool { return true } + } + + readyRunnable := &readyRunnable{ + Runnable: rn, + Check: ready, + } + + // Handle start. + // If the overall runnable group isn't started yet + // we want to buffer the runnables and let Start() + // queue them up again later. + { + r.start.Lock() + + // Check if we're already started. + if !r.started { + // Store the runnable in the internal if not. + r.startQueue = append(r.startQueue, readyRunnable) + r.start.Unlock() + return nil + } + r.start.Unlock() + } + + // Enqueue the runnable. + r.ch <- readyRunnable + return nil +} + +// StopAndWait waits for all the runnables to finish before returning. +func (r *runnableGroup) StopAndWait(ctx context.Context) { + r.stopOnce.Do(func() { + // Close the reconciler channel once we're done. + defer close(r.ch) + + _ = r.Start(ctx) + r.stop.Lock() + // Store the stopped variable so we don't accept any new + // runnables for the time being. + r.stopped = true + r.stop.Unlock() + + // Cancel the internal channel. + r.cancel() + + done := make(chan struct{}) + go func() { + defer close(done) + // Wait for all the runnables to finish. + r.wg.Wait() + }() + + select { + case <-done: + // We're done, exit. + case <-ctx.Done(): + // Calling context has expired, exit. + } + }) +} diff --git a/pkg/manager/runnable_group_test.go b/pkg/manager/runnable_group_test.go new file mode 100644 index 0000000000..db23eeae95 --- /dev/null +++ b/pkg/manager/runnable_group_test.go @@ -0,0 +1,182 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var _ = Describe("runnables", func() { + errCh := make(chan error) + + It("should be able to create a new runnables object", func() { + Expect(newRunnables(defaultBaseContext, errCh)).ToNot(BeNil()) + }) + + It("should add caches to the appropriate group", func() { + cache := &cacheProvider{cache: &informertest.FakeInformers{Error: fmt.Errorf("expected error")}} + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(cache)).To(Succeed()) + Expect(r.Caches.startQueue).To(HaveLen(1)) + }) + + It("should add webhooks to the appropriate group", func() { + webhook := &webhook.Server{} + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(webhook)).To(Succeed()) + Expect(r.Webhooks.startQueue).To(HaveLen(1)) + }) + + It("should add any runnable to the leader election group", func() { + err := errors.New("runnable func") + runnable := RunnableFunc(func(c context.Context) error { + return err + }) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(runnable)).To(Succeed()) + Expect(r.LeaderElection.startQueue).To(HaveLen(1)) + }) +}) + +var _ = Describe("runnableGroup", func() { + errCh := make(chan error) + + It("should be able to add new runnables before it starts", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + + Expect(rg.Started()).To(BeFalse()) + }) + + It("should be able to add new runnables before and after start", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + Expect(rg.Start(ctx)).To(Succeed()) + Expect(rg.Started()).To(BeTrue()) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + }) + + It("should be able to add new runnables before and after start concurrently", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + <-time.After(time.Duration(i) * 10 * time.Millisecond) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + }(i) + } + }) + + It("should be able to close the group and wait for all runnables to finish", func() { + ctx, cancel := context.WithCancel(context.Background()) + + exited := pointer.Int64(0) + rg := newRunnableGroup(defaultBaseContext, errCh) + for i := 0; i < 10; i++ { + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + defer atomic.AddInt64(exited, 1) + <-ctx.Done() + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return nil + }), nil)).To(Succeed()) + } + Expect(rg.Start(ctx)).To(Succeed()) + + // Cancel the context, asking the runnables to exit. + cancel() + rg.StopAndWait(context.Background()) + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + return nil + }), nil)).ToNot(Succeed()) + + Expect(atomic.LoadInt64(exited)).To(BeNumerically("==", 10)) + }) + + It("should be able to wait for all runnables to be ready at different intervals", func() { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return true + })).To(Succeed()) + }(i) + } + }) + + It("should not turn ready if some readiness check fail", func() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return i%2 == 0 // Return false readiness all uneven indexes. + })).To(Succeed()) + }(i) + } + }) +}) diff --git a/pkg/manager/signals/doc.go b/pkg/manager/signals/doc.go new file mode 100644 index 0000000000..737cc7eff2 --- /dev/null +++ b/pkg/manager/signals/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package signals contains libraries for handling signals to gracefully +// shutdown the manager in combination with Kubernetes pod graceful termination +// policy. +package signals diff --git a/pkg/manager/signals/signal.go b/pkg/manager/signals/signal.go new file mode 100644 index 0000000000..a79cfb42df --- /dev/null +++ b/pkg/manager/signals/signal.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "context" + "os" + "os/signal" +) + +var onlyOneSignalHandler = make(chan struct{}) + +// SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned +// which is canceled on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() context.Context { + close(onlyOneSignalHandler) // panics when called twice + + ctx, cancel := context.WithCancel(context.Background()) + + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + cancel() + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return ctx +} diff --git a/pkg/manager/signals/signal_posix.go b/pkg/manager/signals/signal_posix.go new file mode 100644 index 0000000000..a0f00a7321 --- /dev/null +++ b/pkg/manager/signals/signal_posix.go @@ -0,0 +1,27 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/pkg/manager/signals/signal_test.go b/pkg/manager/signals/signal_test.go new file mode 100644 index 0000000000..2776e13a6d --- /dev/null +++ b/pkg/manager/signals/signal_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "fmt" + "os" + "os/signal" + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("runtime signal", func() { + + Context("SignalHandler Test", func() { + + It("test signal handler", func() { + ctx := SetupSignalHandler() + task := &Task{ + ticker: time.NewTicker(time.Second * 2), + } + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + task.wg.Add(1) + go func(c chan os.Signal) { + defer task.wg.Done() + task.Run(c) + }(c) + + select { + case sig := <-c: + fmt.Printf("Got %s signal. Aborting...\n", sig) + case _, ok := <-ctx.Done(): + Expect(ok).To(BeFalse()) + } + }) + + }) + +}) + +type Task struct { + wg sync.WaitGroup + ticker *time.Ticker +} + +func (t *Task) Run(c chan os.Signal) { + for { + go sendSignal(c) + handle() + } +} + +func handle() { + for i := 0; i < 5; i++ { + fmt.Print("#") + time.Sleep(time.Millisecond * 100) + } + fmt.Println() +} + +func sendSignal(stopChan chan os.Signal) { + fmt.Printf("...") + time.Sleep(1 * time.Second) + stopChan <- os.Interrupt +} diff --git a/pkg/manager/signals/signal_windows.go b/pkg/manager/signals/signal_windows.go new file mode 100644 index 0000000000..4907d573fe --- /dev/null +++ b/pkg/manager/signals/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/pkg/manager/signals/signals_suite_test.go b/pkg/manager/signals/signals_suite_test.go new file mode 100644 index 0000000000..770df0ca9c --- /dev/null +++ b/pkg/manager/signals/signals_suite_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os/signal" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Runtime Signal Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + signal.Reset() +}) diff --git a/pkg/manager/testdata/custom-config.yaml b/pkg/manager/testdata/custom-config.yaml new file mode 100644 index 0000000000..a15c9f8e5c --- /dev/null +++ b/pkg/manager/testdata/custom-config.yaml @@ -0,0 +1,3 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: CustomControllerManagerConfiguration +customValue: foo diff --git a/pkg/metrics/client_go_adapter.go b/pkg/metrics/client_go_adapter.go new file mode 100644 index 0000000000..dc805a9d04 --- /dev/null +++ b/pkg/metrics/client_go_adapter.go @@ -0,0 +1,111 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + clientmetrics "k8s.io/client-go/tools/metrics" +) + +// this file contains setup logic to initialize the myriad of places +// that client-go registers metrics. We copy the names and formats +// from Kubernetes so that we match the core controllers. + +// Metrics subsystem and all of the keys used by the rest client. +const ( + RestClientSubsystem = "rest_client" + LatencyKey = "request_latency_seconds" + ResultKey = "requests_total" +) + +var ( + // client metrics. + + // RequestLatency reports the request latency in seconds per verb/URL. + // Deprecated: This metric is deprecated for removal in a future release: using the URL as a + // dimension results in cardinality explosion for some consumers. It was deprecated upstream + // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. + // It is not registered by default. To register: + // import ( + // clientmetrics "k8s.io/client-go/tools/metrics" + // clmetrics "sigs.k8s.io/controller-runtime/metrics" + // ) + // + // func init() { + // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) + // clientmetrics.Register(clientmetrics.RegisterOpts{ + // RequestLatency: clmetrics.LatencyAdapter + // }) + // } + RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: RestClientSubsystem, + Name: LatencyKey, + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, []string{"verb", "url"}) + + requestResult = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: RestClientSubsystem, + Name: ResultKey, + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, []string{"code", "method", "host"}) +) + +func init() { + registerClientMetrics() +} + +// registerClientMetrics sets up the client latency metrics from client-go. +func registerClientMetrics() { + // register the metrics with our registry + Registry.MustRegister(requestResult) + + // register the metrics with client-go + clientmetrics.Register(clientmetrics.RegisterOpts{ + RequestResult: &resultAdapter{metric: requestResult}, + }) +} + +// this section contains adapters, implementations, and other sundry organic, artisanally +// hand-crafted syntax trees required to convince client-go that it actually wants to let +// someone use its metrics. + +// Client metrics adapters (method #1 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +// LatencyAdapter implements LatencyMetric. +type LatencyAdapter struct { + metric *prometheus.HistogramVec +} + +// Observe increments the request latency metric for the given verb/URL. +func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { + l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + metric *prometheus.CounterVec +} + +func (r *resultAdapter) Increment(_ context.Context, code, method, host string) { + r.metric.WithLabelValues(code, method, host).Inc() +} diff --git a/pkg/metrics/doc.go b/pkg/metrics/doc.go new file mode 100644 index 0000000000..6ed9df9514 --- /dev/null +++ b/pkg/metrics/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package metrics contains controller related metrics utilities +*/ +package metrics diff --git a/pkg/metrics/listener.go b/pkg/metrics/listener.go new file mode 100644 index 0000000000..123d8c15f9 --- /dev/null +++ b/pkg/metrics/listener.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" + +// NewListener creates a new TCP listener bound to the given address. +func NewListener(addr string) (net.Listener, error) { + if addr == "" { + // If the metrics bind address is empty, default to ":8080" + addr = DefaultBindAddress + } + + // Add a case to disable metrics altogether + if addr == "0" { + return nil, nil + } + + log.Info("Metrics server is starting to listen", "addr", addr) + ln, err := net.Listen("tcp", addr) + if err != nil { + er := fmt.Errorf("error listening on %s: %w", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er + } + return ln, nil +} diff --git a/pkg/metrics/registry.go b/pkg/metrics/registry.go new file mode 100644 index 0000000000..ce17124d53 --- /dev/null +++ b/pkg/metrics/registry.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// RegistererGatherer combines both parts of the API of a Prometheus +// registry, both the Registerer and the Gatherer interfaces. +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry is a prometheus registry for storing metrics within the +// controller-runtime. +var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/pkg/metrics/workqueue.go b/pkg/metrics/workqueue.go new file mode 100644 index 0000000000..8ca47235da --- /dev/null +++ b/pkg/metrics/workqueue.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" +) + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) +) + +func init() { + Registry.MustRegister(depth) + Registry.MustRegister(adds) + Registry.MustRegister(latency) + Registry.MustRegister(workDuration) + Registry.MustRegister(unfinished) + Registry.MustRegister(longestRunningProcessor) + Registry.MustRegister(retries) + + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/pkg/patterns/application/doc.go b/pkg/patterns/application/doc.go new file mode 100644 index 0000000000..72ba10e5fe --- /dev/null +++ b/pkg/patterns/application/doc.go @@ -0,0 +1,27 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package application documents patterns for building Controllers to manage specific applications. +// +// An application is a Controller and Resource that together implement the operational logic for an application. +// They are often used to take off-the-shelf OSS applications, and make them Kubernetes native. +// +// A typical application Controller may use builder.ControllerManagedBy() to create a Controller +// for a single API type that manages other objects it creates. +// +// Application Controllers are most useful for stateful applications such as Cassandra, Etcd and MySQL +// which contain operation logic for sharding, backup and restore, upgrade / downgrade, etc. +package application diff --git a/pkg/patterns/operator/doc.go b/pkg/patterns/operator/doc.go new file mode 100644 index 0000000000..5ccd0791af --- /dev/null +++ b/pkg/patterns/operator/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package operator serves to redirect users to the application package. + +Operators are the common name for Kubernetes APIs which manage specific applications. e.g. Spark Operator, +Etcd Operator. +*/ +package operator diff --git a/pkg/predicate/doc.go b/pkg/predicate/doc.go new file mode 100644 index 0000000000..e498107ef7 --- /dev/null +++ b/pkg/predicate/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package predicate defines Predicates used by Controllers to filter Events before they are provided to EventHandlers. +*/ +package predicate diff --git a/pkg/predicate/example_test.go b/pkg/predicate/example_test.go new file mode 100644 index 0000000000..57a1ce7779 --- /dev/null +++ b/pkg/predicate/example_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate_test + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var p predicate.Predicate + +// This example creates a new Predicate to drop Update Events where the Generation has not changed. +func ExampleFuncs() { + p = predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + } +} diff --git a/pkg/predicate/predicate.go b/pkg/predicate/predicate.go new file mode 100644 index 0000000000..e79c03072a --- /dev/null +++ b/pkg/predicate/predicate.go @@ -0,0 +1,353 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") + +// Predicate filters events before enqueuing the keys. +type Predicate interface { + // Create returns true if the Create event should be processed + Create(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + Delete(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + Update(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + Generic(event.GenericEvent) bool +} + +var _ Predicate = Funcs{} +var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} +var _ Predicate = AnnotationChangedPredicate{} +var _ Predicate = or{} +var _ Predicate = and{} + +// Funcs is a function that implements Predicate. +type Funcs struct { + // Create returns true if the Create event should be processed + CreateFunc func(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + DeleteFunc func(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + UpdateFunc func(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + GenericFunc func(event.GenericEvent) bool +} + +// Create implements Predicate. +func (p Funcs) Create(e event.CreateEvent) bool { + if p.CreateFunc != nil { + return p.CreateFunc(e) + } + return true +} + +// Delete implements Predicate. +func (p Funcs) Delete(e event.DeleteEvent) bool { + if p.DeleteFunc != nil { + return p.DeleteFunc(e) + } + return true +} + +// Update implements Predicate. +func (p Funcs) Update(e event.UpdateEvent) bool { + if p.UpdateFunc != nil { + return p.UpdateFunc(e) + } + return true +} + +// Generic implements Predicate. +func (p Funcs) Generic(e event.GenericEvent) bool { + if p.GenericFunc != nil { + return p.GenericFunc(e) + } + return true +} + +// NewPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { + return Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return filter(e.Object) + }, + } +} + +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type ResourceVersionChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating resource version change. +func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object to update", "event", e) + return false + } + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating annotation change. +func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) +} + +// LabelChangedPredicate implements a default update predicate function on label change. +// +// This predicate will skip update events that have no change in the object's label. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// +// This will be helpful when object's labels is carrying some extra specification information beyond object's spec, +// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. +type LabelChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for checking label change. +func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And(predicates ...Predicate) Predicate { + return and{predicates} +} + +type and struct { + predicates []Predicate +} + +func (a and) InjectFunc(f inject.Func) error { + for _, p := range a.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (a and) Create(e event.CreateEvent) bool { + for _, p := range a.predicates { + if !p.Create(e) { + return false + } + } + return true +} + +func (a and) Update(e event.UpdateEvent) bool { + for _, p := range a.predicates { + if !p.Update(e) { + return false + } + } + return true +} + +func (a and) Delete(e event.DeleteEvent) bool { + for _, p := range a.predicates { + if !p.Delete(e) { + return false + } + } + return true +} + +func (a and) Generic(e event.GenericEvent) bool { + for _, p := range a.predicates { + if !p.Generic(e) { + return false + } + } + return true +} + +// Or returns a composite predicate that implements a logical OR of the predicates passed to it. +func Or(predicates ...Predicate) Predicate { + return or{predicates} +} + +type or struct { + predicates []Predicate +} + +func (o or) InjectFunc(f inject.Func) error { + for _, p := range o.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (o or) Create(e event.CreateEvent) bool { + for _, p := range o.predicates { + if p.Create(e) { + return true + } + } + return false +} + +func (o or) Update(e event.UpdateEvent) bool { + for _, p := range o.predicates { + if p.Update(e) { + return true + } + } + return false +} + +func (o or) Delete(e event.DeleteEvent) bool { + for _, p := range o.predicates { + if p.Delete(e) { + return true + } + } + return false +} + +func (o or) Generic(e event.GenericEvent) bool { + for _, p := range o.predicates { + if p.Generic(e) { + return true + } + } + return false +} + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} diff --git a/pkg/predicate/predicate_suite_test.go b/pkg/predicate/predicate_suite_test.go new file mode 100644 index 0000000000..a03d94b17d --- /dev/null +++ b/pkg/predicate/predicate_suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestPredicate(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Predicate Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/predicate/predicate_test.go b/pkg/predicate/predicate_test.go new file mode 100644 index 0000000000..5bdaf42e5c --- /dev/null +++ b/pkg/predicate/predicate_test.go @@ -0,0 +1,974 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ = Describe("Predicate", func() { + var pod *corev1.Pod + BeforeEach(func() { + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "biz", Name: "baz"}, + } + }) + + Describe("Funcs", func() { + failingFuncs := predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + defer GinkgoRecover() + Fail("Did not expect CreateFunc to be called.") + return false + }, + DeleteFunc: func(event.DeleteEvent) bool { + defer GinkgoRecover() + Fail("Did not expect DeleteFunc to be called.") + return false + }, + UpdateFunc: func(event.UpdateEvent) bool { + defer GinkgoRecover() + Fail("Did not expect UpdateFunc to be called.") + return false + }, + GenericFunc: func(event.GenericEvent) bool { + defer GinkgoRecover() + Fail("Did not expect GenericFunc to be called.") + return false + }, + } + + It("should call Create", func() { + instance := failingFuncs + instance.CreateFunc = func(evt event.CreateEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return false + } + evt := event.CreateEvent{ + Object: pod, + } + Expect(instance.Create(evt)).To(BeFalse()) + + instance.CreateFunc = func(evt event.CreateEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return true + } + Expect(instance.Create(evt)).To(BeTrue()) + + instance.CreateFunc = nil + Expect(instance.Create(evt)).To(BeTrue()) + }) + + It("should call Update", func() { + newPod := pod.DeepCopy() + newPod.Name = "baz2" + newPod.Namespace = "biz2" + + instance := failingFuncs + instance.UpdateFunc = func(evt event.UpdateEvent) bool { + defer GinkgoRecover() + Expect(evt.ObjectOld).To(Equal(pod)) + Expect(evt.ObjectNew).To(Equal(newPod)) + return false + } + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + Expect(instance.Update(evt)).To(BeFalse()) + + instance.UpdateFunc = func(evt event.UpdateEvent) bool { + defer GinkgoRecover() + Expect(evt.ObjectOld).To(Equal(pod)) + Expect(evt.ObjectNew).To(Equal(newPod)) + return true + } + Expect(instance.Update(evt)).To(BeTrue()) + + instance.UpdateFunc = nil + Expect(instance.Update(evt)).To(BeTrue()) + }) + + It("should call Delete", func() { + instance := failingFuncs + instance.DeleteFunc = func(evt event.DeleteEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return false + } + evt := event.DeleteEvent{ + Object: pod, + } + Expect(instance.Delete(evt)).To(BeFalse()) + + instance.DeleteFunc = func(evt event.DeleteEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return true + } + Expect(instance.Delete(evt)).To(BeTrue()) + + instance.DeleteFunc = nil + Expect(instance.Delete(evt)).To(BeTrue()) + }) + + It("should call Generic", func() { + instance := failingFuncs + instance.GenericFunc = func(evt event.GenericEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return false + } + evt := event.GenericEvent{ + Object: pod, + } + Expect(instance.Generic(evt)).To(BeFalse()) + + instance.GenericFunc = func(evt event.GenericEvent) bool { + defer GinkgoRecover() + Expect(evt.Object).To(Equal(pod)) + return true + } + Expect(instance.Generic(evt)).To(BeTrue()) + + instance.GenericFunc = nil + Expect(instance.Generic(evt)).To(BeTrue()) + }) + }) + + Describe("When checking a ResourceVersionChangedPredicate", func() { + instance := predicate.ResourceVersionChangedPredicate{} + + Context("Where the old object doesn't have a ResourceVersion or metadata", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "1", + }} + + failEvnt := event.UpdateEvent{ + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) + Expect(instance.Update(failEvnt)).Should(BeFalse()) + }) + }) + + Context("Where the new object doesn't have a ResourceVersion or metadata", func() { + It("should return false", func() { + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "1", + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + } + Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) + Expect(instance.Update(failEvnt)).Should(BeFalse()) + Expect(instance.Update(failEvnt)).Should(BeFalse()) + }) + }) + + Context("Where the ResourceVersion hasn't changed", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v1", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v1", + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) + Expect(instance.Update(failEvnt)).Should(BeFalse()) + Expect(instance.Update(failEvnt)).Should(BeFalse()) + }) + }) + + Context("Where the ResourceVersion has changed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v1", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v2", + }} + passEvt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) + Expect(instance.Update(passEvt)).Should(BeTrue()) + }) + }) + + Context("Where the objects or metadata are missing", func() { + + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v1", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + ResourceVersion: "v1", + }} + + failEvt1 := event.UpdateEvent{ObjectOld: old} + failEvt2 := event.UpdateEvent{ObjectNew: new} + failEvt3 := event.UpdateEvent{ObjectOld: old, ObjectNew: new} + Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) + Expect(instance.Update(failEvt1)).Should(BeFalse()) + Expect(instance.Update(failEvt2)).Should(BeFalse()) + Expect(instance.Update(failEvt3)).Should(BeFalse()) + }) + }) + + }) + + Describe("When checking a GenerationChangedPredicate", func() { + instance := predicate.GenerationChangedPredicate{} + Context("Where the old object doesn't have a Generation or metadata", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the new object doesn't have a Generation or metadata", func() { + It("should return false", func() { + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the Generation hasn't changed", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the Generation has changed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 2, + }} + passEvt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where the objects or metadata are missing", func() { + + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvt1 := event.UpdateEvent{ObjectOld: old} + failEvt2 := event.UpdateEvent{ObjectNew: new} + failEvt3 := event.UpdateEvent{ObjectOld: old, ObjectNew: new} + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvt1)).To(BeFalse()) + Expect(instance.Update(failEvt2)).To(BeFalse()) + Expect(instance.Update(failEvt3)).To(BeFalse()) + }) + }) + + }) + + // AnnotationChangedPredicate has almost identical test cases as LabelChangedPredicates, + // so the duplication linter should be muted on both two test suites. + //nolint:dupl + Describe("When checking an AnnotationChangedPredicate", func() { + instance := predicate.AnnotationChangedPredicate{} + Context("Where the old object is missing", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the new object is missing", func() { + It("should return false", func() { + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the annotations are empty", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the annotations haven't changed", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where an annotation value has changed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "weez", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where an annotation has been added", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + "zooz": "qooz", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where an annotation has been removed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + "zooz": "qooz", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + }) + + // LabelChangedPredicates has almost identical test cases as AnnotationChangedPredicates, + // so the duplication linter should be muted on both two test suites. + //nolint:dupl + Describe("When checking a LabelChangedPredicate", func() { + instance := predicate.LabelChangedPredicate{} + Context("Where the old object is missing", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the new object is missing", func() { + It("should return false", func() { + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the labels are empty", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the labels haven't changed", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where a label value has changed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bee", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + + Context("Where a label has been added", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + "faa": "bor", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + + Context("Where a label has been removed", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + "faa": "bor", + }, + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: old, + ObjectNew: new, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + }) + + Context("With a boolean predicate", func() { + funcs := func(pass bool) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return pass + }, + DeleteFunc: func(event.DeleteEvent) bool { + return pass + }, + UpdateFunc: func(event.UpdateEvent) bool { + return pass + }, + GenericFunc: func(event.GenericEvent) bool { + return pass + }, + } + } + passFuncs := funcs(true) + failFuncs := funcs(false) + + var injectFunc inject.Func + injectFunc = func(i interface{}) error { + _, err := inject.InjectorInto(injectFunc, i) + return err + } + + Describe("When checking an And predicate", func() { + It("should return false when one of its predicates returns false", func() { + a := predicate.And(passFuncs, failFuncs) + Expect(a.Create(event.CreateEvent{})).To(BeFalse()) + Expect(a.Update(event.UpdateEvent{})).To(BeFalse()) + Expect(a.Delete(event.DeleteEvent{})).To(BeFalse()) + Expect(a.Generic(event.GenericEvent{})).To(BeFalse()) + }) + It("should return true when all of its predicates return true", func() { + a := predicate.And(passFuncs, passFuncs) + Expect(a.Create(event.CreateEvent{})).To(BeTrue()) + Expect(a.Update(event.UpdateEvent{})).To(BeTrue()) + Expect(a.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(a.Generic(event.GenericEvent{})).To(BeTrue()) + }) + It("should inject into its predicates", func() { + prct := &injectablePredicate{} + a := predicate.And(prct) + Expect(injectFunc(a)).To(Succeed()) + Expect(prct.injected).To(BeTrue()) + }) + }) + Describe("When checking an Or predicate", func() { + It("should return true when one of its predicates returns true", func() { + o := predicate.Or(passFuncs, failFuncs) + Expect(o.Create(event.CreateEvent{})).To(BeTrue()) + Expect(o.Update(event.UpdateEvent{})).To(BeTrue()) + Expect(o.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(o.Generic(event.GenericEvent{})).To(BeTrue()) + }) + It("should return false when all of its predicates return false", func() { + o := predicate.Or(failFuncs, failFuncs) + Expect(o.Create(event.CreateEvent{})).To(BeFalse()) + Expect(o.Update(event.UpdateEvent{})).To(BeFalse()) + Expect(o.Delete(event.DeleteEvent{})).To(BeFalse()) + Expect(o.Generic(event.GenericEvent{})).To(BeFalse()) + }) + It("should inject into its predicates", func() { + prct := &injectablePredicate{} + a := predicate.Or(prct) + Expect(injectFunc(a)).To(Succeed()) + Expect(prct.injected).To(BeTrue()) + }) + }) + }) + + Describe("NewPredicateFuncs with a namespace filter function", func() { + byNamespaceFilter := func(namespace string) func(object client.Object) bool { + return func(object client.Object) bool { + return object.GetNamespace() == namespace + } + } + byNamespaceFuncs := predicate.NewPredicateFuncs(byNamespaceFilter("biz")) + Context("Where the namespace is matching", func() { + It("should return true", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + passEvt1 := event.UpdateEvent{ObjectOld: old, ObjectNew: new} + Expect(byNamespaceFuncs.Create(event.CreateEvent{Object: new})).To(BeTrue()) + Expect(byNamespaceFuncs.Delete(event.DeleteEvent{Object: old})).To(BeTrue()) + Expect(byNamespaceFuncs.Generic(event.GenericEvent{Object: new})).To(BeTrue()) + Expect(byNamespaceFuncs.Update(passEvt1)).To(BeTrue()) + }) + }) + + Context("Where the namespace is not matching", func() { + It("should return false", func() { + new := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "bizz", + }} + + old := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + failEvt1 := event.UpdateEvent{ObjectOld: old, ObjectNew: new} + Expect(byNamespaceFuncs.Create(event.CreateEvent{Object: new})).To(BeFalse()) + Expect(byNamespaceFuncs.Delete(event.DeleteEvent{Object: new})).To(BeFalse()) + Expect(byNamespaceFuncs.Generic(event.GenericEvent{Object: new})).To(BeFalse()) + Expect(byNamespaceFuncs.Update(failEvt1)).To(BeFalse()) + }) + }) + }) + + Describe("When checking a LabelSelectorPredicate", func() { + instance, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}) + if err != nil { + Fail("Improper Label Selector passed during predicate instantiation.") + } + + Context("When the Selector does not match the event labels", func() { + It("should return false", func() { + failMatch := &corev1.Pod{} + Expect(instance.Create(event.CreateEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Delete(event.DeleteEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Generic(event.GenericEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Update(event.UpdateEvent{ObjectNew: failMatch})).To(BeFalse()) + }) + }) + + Context("When the Selector matches the event labels", func() { + It("should return true", func() { + successMatch := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + } + Expect(instance.Create(event.CreateEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Update(event.UpdateEvent{ObjectNew: successMatch})).To(BeTrue()) + }) + }) + }) +}) + +type injectablePredicate struct { + injected bool + predicate.Funcs +} + +func (i *injectablePredicate) InjectFunc(f inject.Func) error { + i.injected = true + return nil +} diff --git a/pkg/ratelimiter/doc.go b/pkg/ratelimiter/doc.go new file mode 100644 index 0000000000..a01d603fe5 --- /dev/null +++ b/pkg/ratelimiter/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ratelimiter defines rate limiters used by Controllers to limit how frequently requests may be queued. + +Typical rate limiters that can be used are implemented in client-go's workqueue package. +*/ +package ratelimiter diff --git a/pkg/ratelimiter/ratelimiter.go b/pkg/ratelimiter/ratelimiter.go new file mode 100644 index 0000000000..565a3a227f --- /dev/null +++ b/pkg/ratelimiter/ratelimiter.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ratelimiter + +import "time" + +// RateLimiter is an identical interface of client-go workqueue RateLimiter. +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} diff --git a/pkg/reconcile/doc.go b/pkg/reconcile/doc.go new file mode 100644 index 0000000000..d221dd7b3f --- /dev/null +++ b/pkg/reconcile/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package reconcile defines the Reconciler interface to implement Kubernetes APIs. Reconciler is provided +to Controllers at creation time as the API implementation. +*/ +package reconcile diff --git a/pkg/reconcile/example_test.go b/pkg/reconcile/example_test.go new file mode 100644 index 0000000000..2b799df90d --- /dev/null +++ b/pkg/reconcile/example_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile_test + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// This example implements a simple no-op reconcile function that prints the object to be Reconciled. +func ExampleFunc() { + r := reconcile.Func(func(_ context.Context, o reconcile.Request) (reconcile.Result, error) { + // Create your business logic to create, update, delete objects here. + fmt.Printf("Name: %s, Namespace: %s", o.Name, o.Namespace) + return reconcile.Result{}, nil + }) + + res, err := r.Reconcile(context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "test"}}) + if err != nil || res.Requeue || res.RequeueAfter != time.Duration(0) { + fmt.Printf("got requeue request: %v, %v\n", err, res) + } + + // Output: Name: test, Namespace: default +} diff --git a/pkg/reconcile/reconcile.go b/pkg/reconcile/reconcile.go new file mode 100644 index 0000000000..8285e2ca9b --- /dev/null +++ b/pkg/reconcile/reconcile.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/types" +) + +// Result contains the result of a Reconciler invocation. +type Result struct { + // Requeue tells the Controller to requeue the reconcile key. Defaults to false. + Requeue bool + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + RequeueAfter time.Duration +} + +// IsZero returns true if this result is empty. +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} +} + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request struct { + // NamespacedName is the name and namespace of the object to reconcile. + types.NamespacedName +} + +/* +Reconciler implements a Kubernetes API for a specific Resource by Creating, Updating or Deleting Kubernetes +objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). + +reconcile implementations compare the state specified in an object by a user against the actual cluster state, +and then perform operations to make the actual cluster state reflect the state specified by the user. + +Typically, reconcile is triggered by a Controller in response to cluster Events (e.g. Creating, Updating, +Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling external sources, etc). + +Example reconcile Logic: + +* Read an object and all the Pods it owns. +* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. +* Create 4 Pods and set their OwnerReferences to the object. + +reconcile may be implemented as either a type: + + type reconciler struct {} + + func (reconciler) Reconcile(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { + // Implement business logic of reading and writing objects here + return reconcile.Result{}, nil + } + +Or as a function: + + reconcile.Func(func(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { + // Implement business logic of reading and writing objects here + return reconcile.Result{}, nil + }) + +Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is +driven by actual cluster state read from the apiserver or a local cache. +For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, +instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. +*/ +type Reconciler interface { + // Reconcile performs a full reconciliation for the object referred to by the Request. + // The Controller will requeue the Request to be processed again if an error is non-nil or + // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. + Reconcile(context.Context, Request) (Result, error) +} + +// Func is a function that implements the reconcile interface. +type Func func(context.Context, Request) (Result, error) + +var _ Reconciler = Func(nil) + +// Reconcile implements Reconciler. +func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } diff --git a/pkg/reconcile/reconcile_suite_test.go b/pkg/reconcile/reconcile_suite_test.go new file mode 100644 index 0000000000..179fb10de4 --- /dev/null +++ b/pkg/reconcile/reconcile_suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestReconcile(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Reconcile Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/reconcile/reconcile_test.go b/pkg/reconcile/reconcile_test.go new file mode 100644 index 0000000000..26924c8fa9 --- /dev/null +++ b/pkg/reconcile/reconcile_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile_test + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("reconcile", func() { + Describe("Result", func() { + It("IsZero should return true if empty", func() { + var res *reconcile.Result + Expect(res.IsZero()).To(BeTrue()) + res2 := &reconcile.Result{} + Expect(res2.IsZero()).To(BeTrue()) + res3 := reconcile.Result{} + Expect(res3.IsZero()).To(BeTrue()) + }) + + It("IsZero should return false if Requeue is set to true", func() { + res := reconcile.Result{Requeue: true} + Expect(res.IsZero()).To(BeFalse()) + }) + + It("IsZero should return false if RequeueAfter is set to true", func() { + res := reconcile.Result{RequeueAfter: 1 * time.Second} + Expect(res.IsZero()).To(BeFalse()) + }) + }) + + Describe("Func", func() { + It("should call the function with the request and return a nil error.", func() { + request := reconcile.Request{ + NamespacedName: types.NamespacedName{Name: "foo", Namespace: "bar"}, + } + result := reconcile.Result{ + Requeue: true, + } + + instance := reconcile.Func(func(_ context.Context, r reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + Expect(r).To(Equal(request)) + + return result, nil + }) + actualResult, actualErr := instance.Reconcile(context.Background(), request) + Expect(actualResult).To(Equal(result)) + Expect(actualErr).NotTo(HaveOccurred()) + }) + + It("should call the function with the request and return an error.", func() { + request := reconcile.Request{ + NamespacedName: types.NamespacedName{Name: "foo", Namespace: "bar"}, + } + result := reconcile.Result{ + Requeue: false, + } + err := fmt.Errorf("hello world") + + instance := reconcile.Func(func(_ context.Context, r reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + Expect(r).To(Equal(request)) + + return result, err + }) + actualResult, actualErr := instance.Reconcile(context.Background(), request) + Expect(actualResult).To(Equal(result)) + Expect(actualErr).To(Equal(err)) + }) + }) +}) diff --git a/pkg/recorder/example_test.go b/pkg/recorder/example_test.go new file mode 100644 index 0000000000..cf1beb40c8 --- /dev/null +++ b/pkg/recorder/example_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder_test + +import ( + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/recorder" +) + +var ( + recorderProvider recorder.Provider + somePod *corev1.Pod // the object you're reconciling, for example +) + +func Example_event() { + // recorderProvider is a recorder.Provider + recorder := recorderProvider.GetEventRecorderFor("my-controller") + + // emit an event with a fixed message + recorder.Event(somePod, corev1.EventTypeWarning, + "WrongTrousers", "It's the wrong trousers, Gromit!") +} + +func Example_eventf() { + // recorderProvider is a recorder.Provider + recorder := recorderProvider.GetEventRecorderFor("my-controller") + + // emit an event with a variable message + mildCheese := "Wensleydale" + recorder.Eventf(somePod, corev1.EventTypeNormal, + "DislikesCheese", "Not even %s?", mildCheese) +} diff --git a/pkg/recorder/recorder.go b/pkg/recorder/recorder.go new file mode 100644 index 0000000000..f093f0a726 --- /dev/null +++ b/pkg/recorder/recorder.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package recorder defines interfaces for working with Kubernetes event recorders. +// +// You can use these to emit Kubernetes events associated with a particular Kubernetes +// object. +package recorder + +import ( + "k8s.io/client-go/tools/record" +) + +// Provider knows how to generate new event recorders with given name. +type Provider interface { + // NewRecorder returns an EventRecorder with given name. + GetEventRecorderFor(name string) record.EventRecorder +} diff --git a/pkg/runtime/doc.go b/pkg/runtime/doc.go new file mode 100644 index 0000000000..34101b3fa4 --- /dev/null +++ b/pkg/runtime/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime contains not-quite-internal mechanisms for +// controller-runtime, plus some deprecated exports of functionality +// moved elsewhere. Most users should not need to import anything in +// pkg/runtime. +package runtime diff --git a/pkg/runtime/inject/doc.go b/pkg/runtime/inject/doc.go new file mode 100644 index 0000000000..17c60895f0 --- /dev/null +++ b/pkg/runtime/inject/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to +the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate +objects which implement the Injectable interfaces. +*/ +package inject diff --git a/pkg/runtime/inject/inject.go b/pkg/runtime/inject/inject.go new file mode 100644 index 0000000000..c8c56ba817 --- /dev/null +++ b/pkg/runtime/inject/inject.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. +// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. +package inject + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and +// Reconciles. +type Cache interface { + InjectCache(cache cache.Cache) error +} + +// CacheInto will set informers on i and return the result if it implements Cache. Returns +// false if i does not implement Cache. +func CacheInto(c cache.Cache, i interface{}) (bool, error) { + if s, ok := i.(Cache); ok { + return true, s.InjectCache(c) + } + return false, nil +} + +// APIReader is used by the Manager to inject the APIReader into necessary types. +type APIReader interface { + InjectAPIReader(client.Reader) error +} + +// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. +// Returns false if i does not implement APIReader. +func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { + if s, ok := i.(APIReader); ok { + return true, s.InjectAPIReader(reader) + } + return false, nil +} + +// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and +// Reconciles. +type Config interface { + InjectConfig(*rest.Config) error +} + +// ConfigInto will set config on i and return the result if it implements Config. Returns +// false if i does not implement Config. +func ConfigInto(config *rest.Config, i interface{}) (bool, error) { + if s, ok := i.(Config); ok { + return true, s.InjectConfig(config) + } + return false, nil +} + +// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and +// Reconciles. +type Client interface { + InjectClient(client.Client) error +} + +// ClientInto will set client on i and return the result if it implements Client. Returns +// false if i does not implement Client. +func ClientInto(client client.Client, i interface{}) (bool, error) { + if s, ok := i.(Client); ok { + return true, s.InjectClient(client) + } + return false, nil +} + +// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and +// Reconciles. +type Scheme interface { + InjectScheme(scheme *runtime.Scheme) error +} + +// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns +// false if i does not implement Scheme. +func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { + if is, ok := i.(Scheme); ok { + return true, is.InjectScheme(scheme) + } + return false, nil +} + +// Stoppable is used by the ControllerManager to inject stop channel into Sources, +// EventHandlers, Predicates, and Reconciles. +type Stoppable interface { + InjectStopChannel(<-chan struct{}) error +} + +// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. +// Returns false if i does not implement Stoppable. +func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { + if s, ok := i.(Stoppable); ok { + return true, s.InjectStopChannel(stop) + } + return false, nil +} + +// Mapper is used to inject the rest mapper to components that may need it. +type Mapper interface { + InjectMapper(meta.RESTMapper) error +} + +// MapperInto will set the rest mapper on i and return the result if it implements Mapper. +// Returns false if i does not implement Mapper. +func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { + if m, ok := i.(Mapper); ok { + return true, m.InjectMapper(mapper) + } + return false, nil +} + +// Func injects dependencies into i. +type Func func(i interface{}) error + +// Injector is used by the ControllerManager to inject Func into Controllers. +type Injector interface { + InjectFunc(f Func) error +} + +// InjectorInto will set f and return the result on i if it implements Injector. Returns +// false if i does not implement Injector. +func InjectorInto(f Func, i interface{}) (bool, error) { + if ii, ok := i.(Injector); ok { + return true, ii.InjectFunc(f) + } + return false, nil +} + +// Logger is used to inject Loggers into components that need them +// and don't otherwise have opinions. +type Logger interface { + InjectLogger(l logr.Logger) error +} + +// LoggerInto will set the logger on the given object if it implements inject.Logger, +// returning true if a InjectLogger was called, and false otherwise. +func LoggerInto(l logr.Logger, i interface{}) (bool, error) { + if injectable, wantsLogger := i.(Logger); wantsLogger { + return true, injectable.InjectLogger(l) + } + return false, nil +} diff --git a/pkg/runtime/inject/inject_suite_test.go b/pkg/runtime/inject/inject_suite_test.go new file mode 100644 index 0000000000..98cf79ab3b --- /dev/null +++ b/pkg/runtime/inject/inject_suite_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inject + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Runtime Injection Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/runtime/inject/inject_test.go b/pkg/runtime/inject/inject_test.go new file mode 100644 index 0000000000..bffc34ec27 --- /dev/null +++ b/pkg/runtime/inject/inject_test.go @@ -0,0 +1,331 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inject + +import ( + "fmt" + "reflect" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var instance *testSource +var uninjectable *failSource +var errInjectFail = fmt.Errorf("injection fails") +var expectedFalse = false + +var _ = Describe("runtime inject", func() { + + BeforeEach(func() { + instance = &testSource{} + uninjectable = &failSource{} + }) + + It("should set informers", func() { + injectedCache := &informertest.FakeInformers{} + + By("Validating injecting the informer") + res, err := CacheInto(injectedCache, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(injectedCache).To(Equal(instance.GetCache())) + + By("Returning false if the type does not implement inject.Cache") + res, err = CacheInto(injectedCache, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(expectedFalse)) + Expect(uninjectable.GetCache()).To(BeNil()) + + By("Returning an error if informer injection fails") + res, err = CacheInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + + }) + + It("should set config", func() { + + cfg := &rest.Config{} + + By("Validating injecting config") + res, err := ConfigInto(cfg, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(cfg).To(Equal(instance.GetConfig())) + + By("Returning false if the type does not implement inject.Config") + res, err = ConfigInto(cfg, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetConfig()).To(BeNil()) + + By("Returning an error if config injection fails") + res, err = ConfigInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + + It("should set client", func() { + client, err := client.NewDelegatingClient(client.NewDelegatingClientInput{Client: fake.NewClientBuilder().Build()}) + Expect(err).NotTo(HaveOccurred()) + + By("Validating injecting client") + res, err := ClientInto(client, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(client).To(Equal(instance.GetClient())) + + By("Returning false if the type does not implement inject.Client") + res, err = ClientInto(client, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetClient()).To(BeNil()) + + By("Returning an error if client injection fails") + res, err = ClientInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + + It("should set scheme", func() { + + scheme := runtime.NewScheme() + + By("Validating injecting scheme") + res, err := SchemeInto(scheme, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(scheme).To(Equal(instance.GetScheme())) + + By("Returning false if the type does not implement inject.Scheme") + res, err = SchemeInto(scheme, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetScheme()).To(BeNil()) + + By("Returning an error if scheme injection fails") + res, err = SchemeInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + + It("should set stop channel", func() { + + stop := make(<-chan struct{}) + + By("Validating injecting stop channel") + res, err := StopChannelInto(stop, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(stop).To(Equal(instance.GetStop())) + + By("Returning false if the type does not implement inject.Stoppable") + res, err = StopChannelInto(stop, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetStop()).To(BeNil()) + + By("Returning an error if stop channel injection fails") + res, err = StopChannelInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + + It("should set api reader", func() { + apiReader, err := client.NewDelegatingClient(client.NewDelegatingClientInput{Client: fake.NewClientBuilder().Build()}) + Expect(err).NotTo(HaveOccurred()) + + By("Validating injecting client") + res, err := APIReaderInto(apiReader, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(apiReader).To(Equal(instance.GetAPIReader())) + + By("Returning false if the type does not implement inject.Client") + res, err = APIReaderInto(apiReader, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetAPIReader()).To(BeNil()) + + By("Returning an error if client injection fails") + res, err = APIReaderInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + + It("should set dependencies", func() { + + f := func(interface{}) error { return nil } + + By("Validating injecting dependencies") + res, err := InjectorInto(f, instance) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(true)) + Expect(reflect.ValueOf(f).Pointer()).To(Equal(reflect.ValueOf(instance.GetFunc()).Pointer())) + + By("Returning false if the type does not implement inject.Injector") + res, err = InjectorInto(f, uninjectable) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal(false)) + Expect(uninjectable.GetFunc()).To(BeNil()) + + By("Returning an error if dependencies injection fails") + res, err = InjectorInto(nil, instance) + Expect(err).To(Equal(errInjectFail)) + Expect(res).To(Equal(true)) + }) + +}) + +type testSource struct { + scheme *runtime.Scheme + cache cache.Cache + config *rest.Config + client client.Client + apiReader client.Reader + f Func + stop <-chan struct{} +} + +func (s *testSource) InjectCache(c cache.Cache) error { + if c != nil { + s.cache = c + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectConfig(config *rest.Config) error { + if config != nil { + s.config = config + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectClient(client client.Client) error { + if client != nil { + s.client = client + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectScheme(scheme *runtime.Scheme) error { + if scheme != nil { + s.scheme = scheme + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectStopChannel(stop <-chan struct{}) error { + if stop != nil { + s.stop = stop + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectAPIReader(reader client.Reader) error { + if reader != nil { + s.apiReader = reader + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) InjectFunc(f Func) error { + if f != nil { + s.f = f + return nil + } + return fmt.Errorf("injection fails") +} + +func (s *testSource) GetCache() cache.Cache { + return s.cache +} + +func (s *testSource) GetConfig() *rest.Config { + return s.config +} + +func (s *testSource) GetScheme() *runtime.Scheme { + return s.scheme +} + +func (s *testSource) GetClient() client.Client { + return s.client +} + +func (s *testSource) GetAPIReader() client.Reader { + return s.apiReader +} + +func (s *testSource) GetFunc() Func { + return s.f +} + +func (s *testSource) GetStop() <-chan struct{} { + return s.stop +} + +type failSource struct { + scheme *runtime.Scheme + cache cache.Cache + config *rest.Config + client client.Client + apiReader client.Reader + f Func + stop <-chan struct{} +} + +func (s *failSource) GetCache() cache.Cache { + return s.cache +} + +func (s *failSource) GetConfig() *rest.Config { + return s.config +} + +func (s *failSource) GetScheme() *runtime.Scheme { + return s.scheme +} + +func (s *failSource) GetClient() client.Client { + return s.client +} + +func (s *failSource) GetAPIReader() client.Reader { + return s.apiReader +} + +func (s *failSource) GetFunc() Func { + return s.f +} + +func (s *failSource) GetStop() <-chan struct{} { + return s.stop +} diff --git a/pkg/scheme/scheme.go b/pkg/scheme/scheme.go new file mode 100644 index 0000000000..55ebe21773 --- /dev/null +++ b/pkg/scheme/scheme.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need, +// panicing if scheme registration failed. For instance, if our controller needs +// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: +// +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } +// +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/pkg/scheme/scheme_suite_test.go b/pkg/scheme/scheme_suite_test.go new file mode 100644 index 0000000000..a11e08fa5c --- /dev/null +++ b/pkg/scheme/scheme_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +func TestScheme(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Scheme Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} diff --git a/pkg/scheme/scheme_test.go b/pkg/scheme/scheme_test.go new file mode 100644 index 0000000000..72f083ad4b --- /dev/null +++ b/pkg/scheme/scheme_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme_test + +import ( + "reflect" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var _ = Describe("Scheme", func() { + Describe("Builder", func() { + It("should provide a Scheme with the types registered", func() { + gv := schema.GroupVersion{Group: "core", Version: "v1"} + + s, err := (&scheme.Builder{GroupVersion: gv}). + Register(&corev1.Pod{}, &corev1.PodList{}). + Build() + Expect(err).NotTo(HaveOccurred()) + + internalGv := schema.GroupVersion{Group: "core", Version: "__internal"} + emptyGv := schema.GroupVersion{Group: "", Version: "v1"} + Expect(s.AllKnownTypes()).To(MatchAllKeys(Keys{ + gv.WithKind("Pod"): Equal(reflect.TypeOf(corev1.Pod{})), + gv.WithKind("PodList"): Equal(reflect.TypeOf(corev1.PodList{})), + + // Base types + gv.WithKind("CreateOptions"): Ignore(), + gv.WithKind("UpdateOptions"): Ignore(), + gv.WithKind("PatchOptions"): Ignore(), + gv.WithKind("DeleteOptions"): Ignore(), + gv.WithKind("GetOptions"): Ignore(), + gv.WithKind("ListOptions"): Ignore(), + gv.WithKind("WatchEvent"): Ignore(), + + internalGv.WithKind("WatchEvent"): Ignore(), + + emptyGv.WithKind("APIGroup"): Ignore(), + emptyGv.WithKind("APIGroupList"): Ignore(), + emptyGv.WithKind("APIResourceList"): Ignore(), + emptyGv.WithKind("APIVersions"): Ignore(), + emptyGv.WithKind("Status"): Ignore(), + })) + }) + + It("should be able to add types from other Builders", func() { + gv1 := schema.GroupVersion{Group: "core", Version: "v1"} + b1 := (&scheme.Builder{GroupVersion: gv1}).Register(&corev1.Pod{}, &corev1.PodList{}) + + gv2 := schema.GroupVersion{Group: "apps", Version: "v1"} + s, err := (&scheme.Builder{GroupVersion: gv2}). + Register(&appsv1.Deployment{}). + Register(&appsv1.DeploymentList{}). + RegisterAll(b1). + Build() + + Expect(err).NotTo(HaveOccurred()) + internalGv1 := schema.GroupVersion{Group: "core", Version: "__internal"} + internalGv2 := schema.GroupVersion{Group: "apps", Version: "__internal"} + emptyGv := schema.GroupVersion{Group: "", Version: "v1"} + Expect(s.AllKnownTypes()).To(MatchAllKeys(Keys{ + // Types from b1 + gv1.WithKind("Pod"): Equal(reflect.TypeOf(corev1.Pod{})), + gv1.WithKind("PodList"): Equal(reflect.TypeOf(corev1.PodList{})), + + // Types from b2 + gv2.WithKind("Deployment"): Equal(reflect.TypeOf(appsv1.Deployment{})), + gv2.WithKind("DeploymentList"): Equal(reflect.TypeOf(appsv1.DeploymentList{})), + + // Base types + gv1.WithKind("CreateOptions"): Ignore(), + gv1.WithKind("UpdateOptions"): Ignore(), + gv1.WithKind("PatchOptions"): Ignore(), + gv1.WithKind("DeleteOptions"): Ignore(), + gv1.WithKind("GetOptions"): Ignore(), + gv1.WithKind("ListOptions"): Ignore(), + gv1.WithKind("WatchEvent"): Ignore(), + + internalGv1.WithKind("WatchEvent"): Ignore(), + + gv2.WithKind("CreateOptions"): Ignore(), + gv2.WithKind("UpdateOptions"): Ignore(), + gv2.WithKind("PatchOptions"): Ignore(), + gv2.WithKind("DeleteOptions"): Ignore(), + gv2.WithKind("GetOptions"): Ignore(), + gv2.WithKind("ListOptions"): Ignore(), + gv2.WithKind("WatchEvent"): Ignore(), + + internalGv2.WithKind("WatchEvent"): Ignore(), + + emptyGv.WithKind("APIGroup"): Ignore(), + emptyGv.WithKind("APIGroupList"): Ignore(), + emptyGv.WithKind("APIResourceList"): Ignore(), + emptyGv.WithKind("APIVersions"): Ignore(), + emptyGv.WithKind("Status"): Ignore(), + })) + }) + }) +}) diff --git a/pkg/source/doc.go b/pkg/source/doc.go new file mode 100644 index 0000000000..31935c83c1 --- /dev/null +++ b/pkg/source/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package source provides event streams to hook up to Controllers with Controller.Watch. Events are +used with handler.EventHandlers to enqueue reconcile.Requests and trigger Reconciles for Kubernetes +objects. +*/ +package source diff --git a/pkg/source/example_test.go b/pkg/source/example_test.go new file mode 100644 index 0000000000..d306eaf583 --- /dev/null +++ b/pkg/source/example_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source_test + +import ( + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ctrl controller.Controller + +// This example Watches for Pod Events (e.g. Create / Update / Delete) and enqueues a reconcile.Request +// with the Name and Namespace of the Pod. +func ExampleKind() { + err := ctrl.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + // handle it + } +} + +// This example reads GenericEvents from a channel and enqueues a reconcile.Request containing the Name and Namespace +// provided by the event. +func ExampleChannel() { + events := make(chan event.GenericEvent) + + err := ctrl.Watch( + &source.Channel{Source: events}, + &handler.EnqueueRequestForObject{}, + ) + if err != nil { + // handle it + } +} diff --git a/pkg/source/internal/eventsource.go b/pkg/source/internal/eventsource.go new file mode 100644 index 0000000000..f0cfe212ed --- /dev/null +++ b/pkg/source/internal/eventsource.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") + +var _ cache.ResourceEventHandler = EventHandler{} + +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. +type EventHandler struct { + EventHandler handler.EventHandler + Queue workqueue.RateLimitingInterface + Predicates []predicate.Predicate +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e EventHandler) OnAdd(obj interface{}) { + c := event.CreateEvent{} + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + c.Object = o + } else { + log.Error(nil, "OnAdd missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Create(c) { + return + } + } + + // Invoke create handler + e.EventHandler.Create(c, e.Queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { + u := event.UpdateEvent{} + + if o, ok := oldObj.(client.Object); ok { + u.ObjectOld = o + } else { + log.Error(nil, "OnUpdate missing ObjectOld", + "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + return + } + + // Pull Object out of the object + if o, ok := newObj.(client.Object); ok { + u.ObjectNew = o + } else { + log.Error(nil, "OnUpdate missing ObjectNew", + "object", newObj, "type", fmt.Sprintf("%T", newObj)) + return + } + + for _, p := range e.Predicates { + if !p.Update(u) { + return + } + } + + // Invoke update handler + e.EventHandler.Update(u, e.Queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e EventHandler) OnDelete(obj interface{}) { + d := event.DeleteEvent{} + + // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a + // DeleteFinalStateUnknown struct, so the object needs to be pulled out. + // Copied from sample-controller + // This should never happen if we aren't missing events, which we have concluded that we are not + // and made decisions off of this belief. Maybe this shouldn't be here? + var ok bool + if _, ok = obj.(client.Object); !ok { + // If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + log.Error(nil, "Error decoding objects. Expected cache.DeletedFinalStateUnknown", + "type", fmt.Sprintf("%T", obj), + "object", obj) + return + } + + // Set obj to the tombstone obj + obj = tombstone.Obj + } + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + d.Object = o + } else { + log.Error(nil, "OnDelete missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Delete(d) { + return + } + } + + // Invoke delete handler + e.EventHandler.Delete(d, e.Queue) +} diff --git a/pkg/source/internal/internal_suite_test.go b/pkg/source/internal/internal_suite_test.go new file mode 100644 index 0000000000..21dd5ee6b4 --- /dev/null +++ b/pkg/source/internal/internal_suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestInternal(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Source Internal Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/source/internal/internal_test.go b/pkg/source/internal/internal_test.go new file mode 100644 index 0000000000..9b96c6d46d --- /dev/null +++ b/pkg/source/internal/internal_test.go @@ -0,0 +1,318 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source/internal" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ = Describe("Internal", func() { + + var instance internal.EventHandler + var funcs, setfuncs *handler.Funcs + var set bool + BeforeEach(func() { + funcs = &handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect CreateEvent to be called.") + }, + DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect DeleteEvent to be called.") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect UpdateEvent to be called.") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Did not expect GenericEvent to be called.") + }, + } + + setfuncs = &handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + set = true + }, + DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + set = true + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + set = true + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + set = true + }, + } + instance = internal.EventHandler{ + Queue: controllertest.Queue{}, + EventHandler: funcs, + } + }) + + Describe("EventHandler", func() { + var pod, newPod *corev1.Pod + + BeforeEach(func() { + pod = &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + }, + } + newPod = pod.DeepCopy() + newPod.Labels = map[string]string{"foo": "bar"} + }) + + It("should create a CreateEvent", func() { + funcs.CreateFunc = func(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q).To(Equal(instance.Queue)) + Expect(evt.Object).To(Equal(pod)) + } + instance.OnAdd(pod) + }) + + It("should used Predicates to filter CreateEvents", func() { + instance = internal.EventHandler{ + Queue: controllertest.Queue{}, + EventHandler: setfuncs, + } + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, + } + instance.OnAdd(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + } + instance.OnAdd(pod) + Expect(set).To(BeTrue()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, + } + instance.OnAdd(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + } + instance.OnAdd(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + } + instance.OnAdd(pod) + Expect(set).To(BeTrue()) + }) + + It("should not call Create EventHandler if the object is not a runtime.Object", func() { + instance.OnAdd(&metav1.ObjectMeta{}) + }) + + It("should not call Create EventHandler if the object does not have metadata", func() { + instance.OnAdd(FooRuntimeObject{}) + }) + + It("should create an UpdateEvent", func() { + funcs.UpdateFunc = func(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q).To(Equal(instance.Queue)) + + Expect(evt.ObjectOld).To(Equal(pod)) + Expect(evt.ObjectNew).To(Equal(newPod)) + } + instance.OnUpdate(pod, newPod) + }) + + It("should used Predicates to filter UpdateEvents", func() { + instance = internal.EventHandler{ + Queue: controllertest.Queue{}, + EventHandler: setfuncs, + } + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{UpdateFunc: func(updateEvent event.UpdateEvent) bool { return false }}, + } + instance.OnUpdate(pod, newPod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, + } + instance.OnUpdate(pod, newPod) + Expect(set).To(BeTrue()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, + predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, + } + instance.OnUpdate(pod, newPod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, + predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, + } + instance.OnUpdate(pod, newPod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, + } + instance.OnUpdate(pod, newPod) + Expect(set).To(BeTrue()) + }) + + It("should not call Update EventHandler if the object is not a runtime.Object", func() { + instance.OnUpdate(&metav1.ObjectMeta{}, &corev1.Pod{}) + instance.OnUpdate(&corev1.Pod{}, &metav1.ObjectMeta{}) + }) + + It("should not call Update EventHandler if the object does not have metadata", func() { + instance.OnUpdate(FooRuntimeObject{}, &corev1.Pod{}) + instance.OnUpdate(&corev1.Pod{}, FooRuntimeObject{}) + }) + + It("should create a DeleteEvent", func() { + funcs.DeleteFunc = func(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q).To(Equal(instance.Queue)) + + Expect(evt.Object).To(Equal(pod)) + } + instance.OnDelete(pod) + }) + + It("should used Predicates to filter DeleteEvents", func() { + instance = internal.EventHandler{ + Queue: controllertest.Queue{}, + EventHandler: setfuncs, + } + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, + } + instance.OnDelete(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, + } + instance.OnDelete(pod) + Expect(set).To(BeTrue()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, + } + instance.OnDelete(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, + } + instance.OnDelete(pod) + Expect(set).To(BeFalse()) + + set = false + instance.Predicates = []predicate.Predicate{ + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, + predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, + } + instance.OnDelete(pod) + Expect(set).To(BeTrue()) + }) + + It("should not call Delete EventHandler if the object is not a runtime.Object", func() { + instance.OnDelete(&metav1.ObjectMeta{}) + }) + + It("should not call Delete EventHandler if the object does not have metadata", func() { + instance.OnDelete(FooRuntimeObject{}) + }) + + It("should create a DeleteEvent from a tombstone", func() { + + tombstone := cache.DeletedFinalStateUnknown{ + Obj: pod, + } + funcs.DeleteFunc = func(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q).To(Equal(instance.Queue)) + Expect(evt.Object).To(Equal(pod)) + } + + instance.OnDelete(tombstone) + }) + + It("should ignore tombstone objects without meta", func() { + tombstone := cache.DeletedFinalStateUnknown{Obj: Foo{}} + instance.OnDelete(tombstone) + }) + It("should ignore objects without meta", func() { + instance.OnAdd(Foo{}) + instance.OnUpdate(Foo{}, Foo{}) + instance.OnDelete(Foo{}) + }) + }) +}) + +type Foo struct{} + +var _ runtime.Object = FooRuntimeObject{} + +type FooRuntimeObject struct{} + +func (FooRuntimeObject) GetObjectKind() schema.ObjectKind { return nil } +func (FooRuntimeObject) DeepCopyObject() runtime.Object { return nil } diff --git a/pkg/source/source.go b/pkg/source/source.go new file mode 100644 index 0000000000..241c582eff --- /dev/null +++ b/pkg/source/source.go @@ -0,0 +1,375 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source/internal" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source") + +const ( + // defaultBufferSize is the default number of event notifications that can be buffered. + defaultBufferSize = 1024 +) + +// Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// which should be processed by event.EventHandlers to enqueue reconcile.Requests. +// +// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). +// +// * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). +// +// Users may build their own Source implementations. If their implementations implement any of the inject package +// interfaces, the dependencies will be injected by the Controller when Watch is called. +type Source interface { + // Start is internal and should be called only by the Controller to register an EventHandler with the Informer + // to enqueue reconcile.Requests. + Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +} + +// SyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type SyncingSource interface { + Source + WaitForSync(ctx context.Context) error +} + +// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used +// and not overwritten. It can be used to watch objects in a different cluster by passing the cache +// from that other cluster. +func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { + return &kindWithCache{kind: Kind{Type: object, cache: cache}} +} + +type kindWithCache struct { + kind Kind +} + +func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + return ks.kind.Start(ctx, handler, queue, prct...) +} + +func (ks *kindWithCache) WaitForSync(ctx context.Context) error { + return ks.kind.WaitForSync(ctx) +} + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // cache used to watch APIs + cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +var _ SyncingSource = &Kind{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Type should have been specified by the user. + if ks.Type == nil { + return fmt.Errorf("must specify Kind.Type") + } + + // cache should have been injected before Start was called + if ks.cache == nil { + return fmt.Errorf("must call CacheInto on Kind before calling Start") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + if !ks.cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return errors.New("timed out waiting for cache to be synced") + } +} + +var _ inject.Cache = &Kind{} + +// InjectCache is internal should be called only by the Controller. InjectCache is used to inject +// the Cache dependency initialized by the ControllerManager. +func (ks *Kind) InjectCache(c cache.Cache) error { + if ks.cache == nil { + ks.cache = c + } + return nil +} + +var _ Source = &Channel{} + +// Channel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (eh.g. http handler) to write GenericEvents to the underlying channel. +type Channel struct { + // once ensures the event distribution goroutine will be performed only once + once sync.Once + + // Source is the source channel to fetch GenericEvents + Source <-chan event.GenericEvent + + // stop is to end ongoing goroutine, and close the channels + stop <-chan struct{} + + // dest is the destination channels of the added event handlers + dest []chan event.GenericEvent + + // DestBufferSize is the specified buffer size of dest channels. + // Default to 1024 if not specified. + DestBufferSize int + + // destLock is to ensure the destination channels are safely added/removed + destLock sync.Mutex +} + +func (cs *Channel) String() string { + return fmt.Sprintf("channel source: %p", cs) +} + +var _ inject.Stoppable = &Channel{} + +// InjectStopChannel is internal should be called only by the Controller. +// It is used to inject the stop channel initialized by the ControllerManager. +func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { + if cs.stop == nil { + cs.stop = stop + } + + return nil +} + +// Start implements Source and should only be called by the Controller. +func (cs *Channel) Start( + ctx context.Context, + handler handler.EventHandler, + queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Source should have been specified by the user. + if cs.Source == nil { + return fmt.Errorf("must specify Channel.Source") + } + + // stop should have been injected before Start was called + if cs.stop == nil { + return fmt.Errorf("must call InjectStop on Channel before calling Start") + } + + // use default value if DestBufferSize not specified + if cs.DestBufferSize == 0 { + cs.DestBufferSize = defaultBufferSize + } + + dst := make(chan event.GenericEvent, cs.DestBufferSize) + + cs.destLock.Lock() + cs.dest = append(cs.dest, dst) + cs.destLock.Unlock() + + cs.once.Do(func() { + // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source + go cs.syncLoop(ctx) + }) + + go func() { + for evt := range dst { + shouldHandle := true + for _, p := range prct { + if !p.Generic(evt) { + shouldHandle = false + break + } + } + + if shouldHandle { + handler.Generic(evt, queue) + } + } + }() + + return nil +} + +func (cs *Channel) doStop() { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + close(dst) + } +} + +func (cs *Channel) distribute(evt event.GenericEvent) { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + // We cannot make it under goroutine here, or we'll meet the + // race condition of writing message to closed channels. + // To avoid blocking, the dest channels are expected to be of + // proper buffer size. If we still see it blocked, then + // the controller is thought to be in an abnormal state. + dst <- evt + } +} + +func (cs *Channel) syncLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + // Close destination channels + cs.doStop() + return + case evt, stillOpen := <-cs.Source: + if !stillOpen { + // if the source channel is closed, we're never gonna get + // anything more on it, so stop & bail + cs.doStop() + return + } + cs.distribute(evt) + } + } +} + +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Informer struct { + // Informer is the controller-runtime Informer + Informer cache.Informer +} + +var _ Source = &Informer{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Informer should have been specified by the user. + if is.Informer == nil { + return fmt.Errorf("must specify Informer.Informer") + } + + is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + return nil +} + +func (is *Informer) String() string { + return fmt.Sprintf("informer source: %p", is.Informer) +} + +var _ Source = Func(nil) + +// Func is a function that implements Source. +type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error + +// Start implements Source. +func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, + pr ...predicate.Predicate) error { + return f(ctx, evt, queue, pr...) +} + +func (f Func) String() string { + return fmt.Sprintf("func source: %p", f) +} diff --git a/pkg/source/source_integration_test.go b/pkg/source/source_integration_test.go new file mode 100644 index 0000000000..f05a154d14 --- /dev/null +++ b/pkg/source/source_integration_test.go @@ -0,0 +1,350 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source_test + +import ( + "fmt" + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("Source", func() { + var instance1, instance2 *source.Kind + var obj client.Object + var q workqueue.RateLimitingInterface + var c1, c2 chan interface{} + var ns string + count := 0 + + BeforeEach(func() { + // Create the namespace for the test + ns = fmt.Sprintf("controller-source-kindsource-%v", count) + count++ + _, err := clientset.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: ns}, + }, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + q = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + c1 = make(chan interface{}) + c2 = make(chan interface{}) + }) + + JustBeforeEach(func() { + instance1 = &source.Kind{Type: obj} + Expect(inject.CacheInto(icache, instance1)).To(BeTrue()) + + instance2 = &source.Kind{Type: obj} + Expect(inject.CacheInto(icache, instance2)).To(BeTrue()) + }) + + AfterEach(func() { + err := clientset.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + close(c1) + close(c2) + }) + + Describe("Kind", func() { + Context("for a Deployment resource", func() { + obj = &appsv1.Deployment{} + + It("should provide Deployment Events", func() { + var created, updated, deleted *appsv1.Deployment + var err error + + // Get the client and Deployment used to create events + client := clientset.AppsV1().Deployments(ns) + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-name"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + + // Create an event handler to verify the events + newHandler := func(c chan interface{}) handler.Funcs { + return handler.Funcs{ + CreateFunc: func(evt event.CreateEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, + UpdateFunc: func(evt event.UpdateEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, + DeleteFunc: func(evt event.DeleteEvent, rli workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(rli).To(Equal(q)) + c <- evt + }, + } + } + handler1 := newHandler(c1) + handler2 := newHandler(c2) + + // Create 2 instances + Expect(instance1.Start(ctx, handler1, q)).To(Succeed()) + Expect(instance2.Start(ctx, handler2, q)).To(Succeed()) + + By("Creating a Deployment and expecting the CreateEvent.") + created, err = client.Create(ctx, deployment, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(created).NotTo(BeNil()) + + // Check first CreateEvent + evt := <-c1 + createEvt, ok := evt.(event.CreateEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.CreateEvent{})) + Expect(createEvt.Object).To(Equal(created)) + + // Check second CreateEvent + evt = <-c2 + createEvt, ok = evt.(event.CreateEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.CreateEvent{})) + Expect(createEvt.Object).To(Equal(created)) + + By("Updating a Deployment and expecting the UpdateEvent.") + updated = created.DeepCopy() + updated.Labels = map[string]string{"biz": "buz"} + updated, err = client.Update(ctx, updated, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Check first UpdateEvent + evt = <-c1 + updateEvt, ok := evt.(event.UpdateEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.UpdateEvent{})) + + Expect(updateEvt.ObjectNew).To(Equal(updated)) + + Expect(updateEvt.ObjectOld).To(Equal(created)) + + // Check second UpdateEvent + evt = <-c2 + updateEvt, ok = evt.(event.UpdateEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.UpdateEvent{})) + + Expect(updateEvt.ObjectNew).To(Equal(updated)) + + Expect(updateEvt.ObjectOld).To(Equal(created)) + + By("Deleting a Deployment and expecting the Delete.") + deleted = updated.DeepCopy() + err = client.Delete(ctx, created.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + deleted.SetResourceVersion("") + evt = <-c1 + deleteEvt, ok := evt.(event.DeleteEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.DeleteEvent{})) + deleteEvt.Object.SetResourceVersion("") + Expect(deleteEvt.Object).To(Equal(deleted)) + + evt = <-c2 + deleteEvt, ok = evt.(event.DeleteEvent) + Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.DeleteEvent{})) + deleteEvt.Object.SetResourceVersion("") + Expect(deleteEvt.Object).To(Equal(deleted)) + }, 5) + }) + + // TODO(pwittrock): Write this test + PContext("for a Foo CRD resource", func() { + It("should provide Foo Events", func() { + + }) + }) + }) + + Describe("Informer", func() { + var c chan struct{} + var rs *appsv1.ReplicaSet + var depInformer toolscache.SharedIndexInformer + var informerFactory kubeinformers.SharedInformerFactory + var stopTest chan struct{} + + BeforeEach(func() { + stopTest = make(chan struct{}) + informerFactory = kubeinformers.NewSharedInformerFactory(clientset, time.Second*30) + depInformer = informerFactory.Apps().V1().ReplicaSets().Informer() + informerFactory.Start(stopTest) + Eventually(depInformer.HasSynced).Should(BeTrue()) + + c = make(chan struct{}) + rs = &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{Name: "informer-rs-name"}, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + }) + + AfterEach(func() { + close(stopTest) + }) + + Context("for a ReplicaSet resource", func() { + It("should provide a ReplicaSet CreateEvent", func() { + c := make(chan struct{}) + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Informer{Informer: depInformer} + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + var err error + rs, err := clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(rs)) + close(c) + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + _, err = clientset.AppsV1().ReplicaSets("default").Create(ctx, rs, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + <-c + }, 30) + + It("should provide a ReplicaSet UpdateEvent", func() { + var err error + rs, err = clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + rs2 := rs.DeepCopy() + rs2.SetLabels(map[string]string{"biz": "baz"}) + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Informer{Informer: depInformer} + err = instance.Start(ctx, handler.Funcs{ + CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + }, + UpdateFunc: func(evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + var err error + rs2, err := clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(q2).To(Equal(q)) + Expect(evt.ObjectOld).To(Equal(rs)) + + Expect(evt.ObjectNew).To(Equal(rs2)) + + close(c) + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + _, err = clientset.AppsV1().ReplicaSets("default").Update(ctx, rs2, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + <-c + }) + + It("should provide a ReplicaSet DeletedEvent", func() { + c := make(chan struct{}) + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Informer{Informer: depInformer} + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + }, + DeleteFunc: func(evt event.DeleteEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt.Object.GetName()).To(Equal(rs.Name)) + close(c) + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + err = clientset.AppsV1().ReplicaSets("default").Delete(ctx, rs.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + <-c + }) + }) + }) +}) diff --git a/pkg/source/source_suite_test.go b/pkg/source/source_suite_test.go new file mode 100644 index 0000000000..9fd9671cd0 --- /dev/null +++ b/pkg/source/source_suite_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source_test + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Source Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var config *rest.Config +var clientset *kubernetes.Clientset +var icache cache.Cache +var ctx context.Context +var cancel context.CancelFunc + +var _ = BeforeSuite(func() { + ctx, cancel = context.WithCancel(context.Background()) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + config, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + clientset, err = kubernetes.NewForConfig(config) + Expect(err).NotTo(HaveOccurred()) + + icache, err = cache.New(config, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(icache.Start(ctx)).NotTo(HaveOccurred()) + }() +}, 60) + +var _ = AfterSuite(func() { + cancel() + Expect(testenv.Stop()).To(Succeed()) +}, 5) diff --git a/pkg/source/source_test.go b/pkg/source/source_test.go new file mode 100644 index 0000000000..70c708df08 --- /dev/null +++ b/pkg/source/source_test.go @@ -0,0 +1,568 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source_test + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" +) + +var _ = Describe("Source", func() { + Describe("Kind", func() { + var c chan struct{} + var p *corev1.Pod + var ic *informertest.FakeInformers + + BeforeEach(func() { + ic = &informertest.FakeInformers{} + c = make(chan struct{}) + p = &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test", Image: "test"}, + }, + }, + } + }) + + Context("for a Pod resource", func() { + It("should provide a Pod CreateEvent", func() { + c := make(chan struct{}) + p := &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test", Image: "test"}, + }, + }, + } + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Kind{ + Type: &corev1.Pod{}, + } + Expect(inject.CacheInto(ic, instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt.Object).To(Equal(p)) + close(c) + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) + + i, err := ic.FakeInformerFor(&corev1.Pod{}) + Expect(err).NotTo(HaveOccurred()) + + i.Add(p) + <-c + }) + + It("should provide a Pod UpdateEvent", func() { + p2 := p.DeepCopy() + p2.SetLabels(map[string]string{"biz": "baz"}) + + ic := &informertest.FakeInformers{} + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Kind{ + Type: &corev1.Pod{}, + } + Expect(instance.InjectCache(ic)).To(Succeed()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.ObjectOld).To(Equal(p)) + + Expect(evt.ObjectNew).To(Equal(p2)) + + close(c) + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) + + i, err := ic.FakeInformerFor(&corev1.Pod{}) + Expect(err).NotTo(HaveOccurred()) + + i.Update(p, p2) + <-c + }) + + It("should provide a Pod DeletedEvent", func() { + c := make(chan struct{}) + p := &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test", Image: "test"}, + }, + }, + } + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Kind{ + Type: &corev1.Pod{}, + } + Expect(inject.CacheInto(ic, instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(evt event.DeleteEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + close(c) + }, + GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) + + i, err := ic.FakeInformerFor(&corev1.Pod{}) + Expect(err).NotTo(HaveOccurred()) + + i.Delete(p) + <-c + }) + }) + + It("should return an error from Start if informers were not injected", func() { + instance := source.Kind{Type: &corev1.Pod{}} + err := instance.Start(ctx, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must call CacheInto on Kind before calling Start")) + }) + + It("should return an error from Start if a type was not provided", func() { + instance := source.Kind{} + Expect(instance.InjectCache(&informertest.FakeInformers{})).To(Succeed()) + err := instance.Start(ctx, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must specify Kind.Type")) + }) + + It("should return an error if syncing fails", func() { + instance := source.Kind{Type: &corev1.Pod{}} + f := false + Expect(instance.InjectCache(&informertest.FakeInformers{Synced: &f})).To(Succeed()) + Expect(instance.Start(context.Background(), nil, nil)).NotTo(HaveOccurred()) + err := instance.WaitForSync(context.Background()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("cache did not sync")) + + }) + + Context("for a Kind not in the cache", func() { + It("should return an error when WaitForSync is called", func() { + ic.Error = fmt.Errorf("test error") + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + + instance := &source.Kind{ + Type: &corev1.Pod{}, + } + Expect(instance.InjectCache(ic)).To(Succeed()) + err := instance.Start(ctx, handler.Funcs{}, q) + Expect(err).NotTo(HaveOccurred()) + Eventually(instance.WaitForSync(context.Background())).Should(HaveOccurred()) + }) + }) + }) + + Describe("KindWithCache", func() { + It("should not allow injecting a cache", func() { + instance := source.NewKindWithCache(nil, nil) + injected, err := inject.CacheInto(&informertest.FakeInformers{}, instance) + Expect(err).To(BeNil()) + Expect(injected).To(BeFalse()) + }) + + It("should return an error if syncing fails", func() { + f := false + instance := source.NewKindWithCache(&corev1.Pod{}, &informertest.FakeInformers{Synced: &f}) + Expect(instance.Start(context.Background(), nil, nil)).NotTo(HaveOccurred()) + err := instance.WaitForSync(context.Background()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("cache did not sync")) + + }) + }) + + Describe("Func", func() { + It("should be called from Start", func() { + run := false + instance := source.Func(func( + context.Context, + handler.EventHandler, + workqueue.RateLimitingInterface, ...predicate.Predicate) error { + run = true + return nil + }) + Expect(instance.Start(ctx, nil, nil)).NotTo(HaveOccurred()) + Expect(run).To(BeTrue()) + + expected := fmt.Errorf("expected error: Func") + instance = source.Func(func( + context.Context, + handler.EventHandler, + workqueue.RateLimitingInterface, ...predicate.Predicate) error { + return expected + }) + Expect(instance.Start(ctx, nil, nil)).To(Equal(expected)) + }) + }) + + Describe("Channel", func() { + var ctx context.Context + var cancel context.CancelFunc + var ch chan event.GenericEvent + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + ch = make(chan event.GenericEvent) + }) + + AfterEach(func() { + cancel() + close(ch) + }) + + Context("for a source", func() { + It("should provide a GenericEvent", func() { + ch := make(chan event.GenericEvent) + c := make(chan struct{}) + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, + } + evt := event.GenericEvent{ + Object: p, + } + // Event that should be filtered out by predicates + invalidEvt := event.GenericEvent{} + + // Predicate to filter out empty event + prct := predicate.Funcs{ + GenericFunc: func(e event.GenericEvent) bool { + return e.Object != nil + }, + } + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Channel{Source: ch} + Expect(inject.StopChannelInto(ctx.Done(), instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + // The empty event should have been filtered out by the predicates, + // and will not be passed to the handler. + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + close(c) + }, + }, q, prct) + Expect(err).NotTo(HaveOccurred()) + + ch <- invalidEvt + ch <- evt + <-c + }) + It("should get pending events processed once channel unblocked", func() { + ch := make(chan event.GenericEvent) + unblock := make(chan struct{}) + processed := make(chan struct{}) + evt := event.GenericEvent{} + eventCount := 0 + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + // Add a handler to get distribution blocked + instance := &source.Channel{Source: ch} + instance.DestBufferSize = 1 + Expect(inject.StopChannelInto(ctx.Done(), instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + // Block for the first time + if eventCount == 0 { + <-unblock + } + eventCount++ + + if eventCount == 3 { + close(processed) + } + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + // Write 3 events into the source channel. + // The 1st should be passed into the generic func of the handler; + // The 2nd should be fetched out of the source channel, and waiting to write into dest channel; + // The 3rd should be pending in the source channel. + ch <- evt + ch <- evt + ch <- evt + + // Validate none of the events have been processed. + Expect(eventCount).To(Equal(0)) + + close(unblock) + + <-processed + + // Validate all of the events have been processed. + Expect(eventCount).To(Equal(3)) + }) + It("should be able to cope with events in the channel before the source is started", func() { + ch := make(chan event.GenericEvent, 1) + processed := make(chan struct{}) + evt := event.GenericEvent{} + ch <- evt + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + // Add a handler to get distribution blocked + instance := &source.Channel{Source: ch} + instance.DestBufferSize = 1 + Expect(inject.StopChannelInto(ctx.Done(), instance)).To(BeTrue()) + + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + + close(processed) + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + <-processed + }) + It("should stop when the source channel is closed", func() { + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + // if we didn't stop, we'd start spamming the queue with empty + // messages as we "received" a zero-valued GenericEvent from + // the source channel + + By("creating a channel with one element, then closing it") + ch := make(chan event.GenericEvent, 1) + evt := event.GenericEvent{} + ch <- evt + close(ch) + + By("feeding that channel to a channel source") + src := &source.Channel{Source: ch} + Expect(inject.StopChannelInto(ctx.Done(), src)).To(BeTrue()) + + processed := make(chan struct{}) + defer close(processed) + + err := src.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + + processed <- struct{}{} + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + By("expecting to only get one event") + Eventually(processed).Should(Receive()) + Consistently(processed).ShouldNot(Receive()) + }) + It("should get error if no source specified", func() { + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Channel{ /*no source specified*/ } + Expect(inject.StopChannelInto(ctx.Done(), instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{}, q) + Expect(err).To(Equal(fmt.Errorf("must specify Channel.Source"))) + }) + It("should get error if no stop channel injected", func() { + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Channel{Source: ch} + err := instance.Start(ctx, handler.Funcs{}, q) + Expect(err).To(Equal(fmt.Errorf("must call InjectStop on Channel before calling Start"))) + }) + }) + Context("for multi sources (handlers)", func() { + It("should provide GenericEvents for all handlers", func() { + ch := make(chan event.GenericEvent) + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, + } + evt := event.GenericEvent{ + Object: p, + } + + var resEvent1, resEvent2 event.GenericEvent + c1 := make(chan struct{}) + c2 := make(chan struct{}) + + q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + instance := &source.Channel{Source: ch} + Expect(inject.StopChannelInto(ctx.Done(), instance)).To(BeTrue()) + err := instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + resEvent1 = evt + close(c1) + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + err = instance.Start(ctx, handler.Funcs{ + CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { + defer GinkgoRecover() + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + resEvent2 = evt + close(c2) + }, + }, q) + Expect(err).NotTo(HaveOccurred()) + + ch <- evt + <-c1 + <-c2 + + // Validate the two handlers received same event + Expect(resEvent1).To(Equal(resEvent2)) + }) + }) + }) +}) diff --git a/pkg/webhook/admission/admission_suite_test.go b/pkg/webhook/admission/admission_suite_test.go new file mode 100644 index 0000000000..3648aa45b7 --- /dev/null +++ b/pkg/webhook/admission/admission_suite_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestAdmissionWebhook(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Admission Webhook Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}, 60) diff --git a/pkg/webhook/admission/admissiontest/doc.go b/pkg/webhook/admission/admissiontest/doc.go new file mode 100644 index 0000000000..b4a7a42191 --- /dev/null +++ b/pkg/webhook/admission/admissiontest/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package admissiontest contains fake webhooks for validating admission webhooks +package admissiontest diff --git a/pkg/webhook/admission/admissiontest/util.go b/pkg/webhook/admission/admissiontest/util.go new file mode 100644 index 0000000000..685e8274d8 --- /dev/null +++ b/pkg/webhook/admission/admissiontest/util.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admissiontest + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// FakeValidator provides fake validating webhook functionality for testing +// It implements the admission.Validator interface and +// rejects all requests with the same configured error +// or passes if ErrorToReturn is nil. +type FakeValidator struct { + // ErrorToReturn is the error for which the FakeValidator rejects all requests + ErrorToReturn error `json:"ErrorToReturn,omitempty"` + // GVKToReturn is the GroupVersionKind that the webhook operates on + GVKToReturn schema.GroupVersionKind +} + +// ValidateCreate implements admission.Validator. +func (v *FakeValidator) ValidateCreate() error { + return v.ErrorToReturn +} + +// ValidateUpdate implements admission.Validator. +func (v *FakeValidator) ValidateUpdate(old runtime.Object) error { + return v.ErrorToReturn +} + +// ValidateDelete implements admission.Validator. +func (v *FakeValidator) ValidateDelete() error { + return v.ErrorToReturn +} + +// GetObjectKind implements admission.Validator. +func (v *FakeValidator) GetObjectKind() schema.ObjectKind { return v } + +// DeepCopyObject implements admission.Validator. +func (v *FakeValidator) DeepCopyObject() runtime.Object { + return &FakeValidator{ErrorToReturn: v.ErrorToReturn, GVKToReturn: v.GVKToReturn} +} + +// GroupVersionKind implements admission.Validator. +func (v *FakeValidator) GroupVersionKind() schema.GroupVersionKind { + return v.GVKToReturn +} + +// SetGroupVersionKind implements admission.Validator. +func (v *FakeValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) { + v.GVKToReturn = gvk +} diff --git a/pkg/webhook/admission/decode.go b/pkg/webhook/admission/decode.go new file mode 100644 index 0000000000..c7cb71b755 --- /dev/null +++ b/pkg/webhook/admission/decode.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/json" +) + +// Decoder knows how to decode the contents of an admission +// request into a concrete object. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme. +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. +// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. +func (d *Decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + return d.DecodeRaw(req.Object, into) +} + +// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. +// It errors out if rawObj is empty i.e. containing 0 raw bytes. +func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { + // NB(directxman12): there's a bug/weird interaction between decoders and + // the API server where the API server doesn't send a GVK on the embedded + // objects, which means the unstructured decoder refuses to decode. It + // also means we can't pass the unstructured directly in, since it'll try + // and call unstructured's special Unmarshal implementation, which calls + // back into that same decoder :-/ + // See kubernetes/kubernetes#74373. + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + // unmarshal into unstructured's underlying object to avoid calling the decoder + return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + } + + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, rawObj.Raw, into) +} diff --git a/pkg/webhook/admission/decode_test.go b/pkg/webhook/admission/decode_test.go new file mode 100644 index 0000000000..c167c51026 --- /dev/null +++ b/pkg/webhook/admission/decode_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +var _ = Describe("Admission Webhook Decoder", func() { + var decoder *Decoder + BeforeEach(func() { + By("creating a new decoder for a scheme") + var err error + decoder, err = NewDecoder(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + Expect(decoder).NotTo(BeNil()) + }) + + req := Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Object: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "foo", + "namespace": "default" + }, + "spec": { + "containers": [ + { + "image": "bar:v2", + "name": "bar" + } + ] + } +}`), + }, + OldObject: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "foo", + "namespace": "default" + }, + "spec": { + "containers": [ + { + "image": "bar:v1", + "name": "bar" + } + ] + } +}`), + }, + }, + } + + It("should decode a valid admission request", func() { + By("extracting the object from the request") + var actualObj corev1.Pod + Expect(decoder.Decode(req, &actualObj)).To(Succeed()) + + By("verifying that all data is present in the object") + Expect(actualObj).To(Equal(corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Image: "bar:v2", Name: "bar"}, + }, + }, + })) + }) + + It("should decode a valid RawExtension object", func() { + By("decoding the RawExtension object") + var actualObj corev1.Pod + Expect(decoder.DecodeRaw(req.OldObject, &actualObj)).To(Succeed()) + + By("verifying that all data is present in the object") + Expect(actualObj).To(Equal(corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Image: "bar:v1", Name: "bar"}, + }, + }, + })) + }) + + It("should fail to decode if the object in the request doesn't match the passed-in type", func() { + By("trying to extract a pod from the quest into a node") + Expect(decoder.Decode(req, &corev1.Node{})).NotTo(Succeed()) + + By("trying to extract a pod in RawExtension format into a node") + Expect(decoder.DecodeRaw(req.OldObject, &corev1.Node{})).NotTo(Succeed()) + }) + + It("should be able to decode into an unstructured object", func() { + By("decoding the request into an unstructured object") + var target unstructured.Unstructured + Expect(decoder.Decode(req, &target)).To(Succeed()) + + By("sanity-checking the metadata on the output object") + Expect(target.Object["metadata"]).To(Equal(map[string]interface{}{ + "name": "foo", + "namespace": "default", + })) + + By("decoding the RawExtension object into an unstructured object") + var target2 unstructured.Unstructured + Expect(decoder.DecodeRaw(req.Object, &target2)).To(Succeed()) + + By("sanity-checking the metadata on the output object") + Expect(target2.Object["metadata"]).To(Equal(map[string]interface{}{ + "name": "foo", + "namespace": "default", + })) + }) +}) diff --git a/pkg/webhook/admission/defaulter.go b/pkg/webhook/admission/defaulter.go new file mode 100644 index 0000000000..e4e0778f57 --- /dev/null +++ b/pkg/webhook/admission/defaulter.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "net/http" + + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Defaulter defines functions for setting defaults on resources. +type Defaulter interface { + runtime.Object + Default() +} + +// DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +func DefaultingWebhookFor(defaulter Defaulter) *Webhook { + return &Webhook{ + Handler: &mutatingHandler{defaulter: defaulter}, + } +} + +type mutatingHandler struct { + defaulter Defaulter + decoder *Decoder +} + +var _ DecoderInjector = &mutatingHandler{} + +// InjectDecoder injects the decoder into a mutatingHandler. +func (h *mutatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + + // always skip when a DELETE operation received in mutation handler + // describe in https://github.com/kubernetes-sigs/controller-runtime/issues/1762 + if req.Operation == admissionv1.Delete { + return Response{AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }} + } + + // Get the object in the request + obj := h.defaulter.DeepCopyObject().(Defaulter) + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + obj.Default() + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + // Create the patch + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/pkg/webhook/admission/defaulter_custom.go b/pkg/webhook/admission/defaulter_custom.go new file mode 100644 index 0000000000..d65727e62c --- /dev/null +++ b/pkg/webhook/admission/defaulter_custom.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter interface { + Default(ctx context.Context, obj runtime.Object) error +} + +// WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. +func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { + return &Webhook{ + Handler: &defaulterForType{object: obj, defaulter: defaulter}, + } +} + +type defaulterForType struct { + defaulter CustomDefaulter + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &defaulterForType{} + +func (h *defaulterForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + if err := h.defaulter.Default(ctx, obj); err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Create the patch + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/pkg/webhook/admission/defaulter_test.go b/pkg/webhook/admission/defaulter_test.go new file mode 100644 index 0000000000..93c3eda7c2 --- /dev/null +++ b/pkg/webhook/admission/defaulter_test.go @@ -0,0 +1,68 @@ +package admission + +import ( + "context" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ = Describe("Defaulter Handler", func() { + + It("should return ok if received delete verb in defaulter handler", func() { + obj := &TestDefaulter{} + handler := DefaultingWebhookFor(obj) + + resp := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + }, + }, + }) + Expect(resp.Allowed).Should(BeTrue()) + Expect(resp.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + +}) + +// TestDefaulter. +var _ runtime.Object = &TestDefaulter{} + +type TestDefaulter struct { + Replica int `json:"replica,omitempty"` +} + +var testDefaulterGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaulter"} + +func (d *TestDefaulter) GetObjectKind() schema.ObjectKind { return d } +func (d *TestDefaulter) DeepCopyObject() runtime.Object { + return &TestDefaulter{ + Replica: d.Replica, + } +} + +func (d *TestDefaulter) GroupVersionKind() schema.GroupVersionKind { + return testDefaulterGVK +} + +func (d *TestDefaulter) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaulterList{} + +type TestDefaulterList struct{} + +func (*TestDefaulterList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaulterList) DeepCopyObject() runtime.Object { return nil } + +func (d *TestDefaulter) Default() { + if d.Replica < 2 { + d.Replica = 2 + } +} diff --git a/pkg/webhook/admission/doc.go b/pkg/webhook/admission/doc.go new file mode 100644 index 0000000000..0b274dd02b --- /dev/null +++ b/pkg/webhook/admission/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package admission provides implementation for admission webhook and methods to implement admission webhook handlers. + +See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. +*/ +package admission + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("admission") diff --git a/pkg/webhook/admission/http.go b/pkg/webhook/admission/http.go new file mode 100644 index 0000000000..066cc42256 --- /dev/null +++ b/pkg/webhook/admission/http.go @@ -0,0 +1,153 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + v1 "k8s.io/api/admission/v1" + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var admissionScheme = runtime.NewScheme() +var admissionCodecs = serializer.NewCodecFactory(admissionScheme) + +func init() { + utilruntime.Must(v1.AddToScheme(admissionScheme)) + utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) +} + +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var body []byte + var err error + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + var reviewResponse Response + if r.Body == nil { + err = errors.New("request body is empty") + wh.log.Error(err, "bad request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + defer r.Body.Close() + if body, err = io.ReadAll(r.Body); err != nil { + wh.log.Error(err, "unable to read the body from the incoming request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // verify the content type is accurate + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + err = fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1. + // The actual AdmissionReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. + req := Request{} + ar := unversionedAdmissionReview{} + // avoid an extra copy + ar.Request = &req.AdmissionRequest + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.log.Error(err, "unable to decode the request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + + reviewResponse = wh.Handle(ctx, req) + wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) +} + +// writeResponse writes response to w generically, i.e. without encoding GVK information. +func (wh *Webhook) writeResponse(w io.Writer, response Response) { + wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse}) +} + +// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary +// if multiple AdmissionReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) { + ar := v1.AdmissionReview{ + Response: &response.AdmissionResponse, + } + // Default to a v1 AdmissionReview, otherwise the API server may not recognize the request + // if multiple AdmissionReview versions are permitted by the webhook config. + // TODO(estroz): this should be configurable since older API servers won't know about v1. + if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + } else { + ar.SetGroupVersionKind(*admRevGVK) + } + wh.writeAdmissionResponse(w, ar) +} + +// writeAdmissionResponse writes ar to w. +func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.log.Error(err, "unable to encode and write the response") + // Since the `ar v1.AdmissionReview` is a clear and legal object, + // it should not have problem to be marshalled into bytes. + // The error here is probably caused by the abnormal HTTP connection, + // e.g., broken pipe, so we can only write the error response once, + // to avoid endless circular calling. + serverError := Errored(http.StatusInternalServerError, err) + if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { + wh.log.Error(err, "still unable to encode and write the InternalServerError response") + } + } else { + res := ar.Response + if log := wh.log; log.V(1).Enabled() { + if res.Result != nil { + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + } + log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + } + } +} + +// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types. +type unversionedAdmissionReview struct { + v1.AdmissionReview +} + +var _ runtime.Object = &unversionedAdmissionReview{} diff --git a/pkg/webhook/admission/http_test.go b/pkg/webhook/admission/http_test.go new file mode 100644 index 0000000000..af8ff31ee2 --- /dev/null +++ b/pkg/webhook/admission/http_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ = Describe("Admission Webhooks", func() { + + const ( + gvkJSONv1 = `"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1"` + gvkJSONv1beta1 = `"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1beta1"` + ) + + Describe("HTTP Handler", func() { + var respRecorder *httptest.ResponseRecorder + webhook := &Webhook{ + Handler: nil, + } + BeforeEach(func() { + respRecorder = &httptest.ResponseRecorder{ + Body: bytes.NewBuffer(nil), + } + _, err := inject.LoggerInto(log.WithName("test-webhook"), webhook) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return bad-request when given an empty body", func() { + req := &http.Request{Body: nil} + + expected := `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400}}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given the wrong content-type", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/foo"}}, + Body: nopCloser{Reader: bytes.NewBuffer(nil)}, + } + + expected := + `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"contentType=application/foo, expected application/json","code":400}}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given an undecodable body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString("{")}, + } + + expected := + `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"couldn't get version/kind; json parse error: unexpected end of JSON input","code":400}}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the response given by the handler with version defaulted to v1", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1beta1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1beta1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1beta1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should present the Context from the HTTP request, if any", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, + } + type ctxkey int + const key ctxkey = 1 + const value = "from-ctx" + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + <-ctx.Done() + return Allowed(ctx.Value(key).(string)) + }, + }, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"reason":%q,"code":200}}} +`, gvkJSONv1, value) + + ctx, cancel := context.WithCancel(context.WithValue(context.Background(), key, value)) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should mutate the Context from the HTTP request, if func supplied", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, + } + type ctxkey int + const key ctxkey = 1 + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Allowed(ctx.Value(key).(string)) + }, + }, + WithContextFunc: func(ctx context.Context, r *http.Request) context.Context { + return context.WithValue(ctx, key, r.Header["Content-Type"][0]) + }, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"reason":%q,"code":200}}} +`, gvkJSONv1, "application/json") + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should never run into circular calling if the writer has broken", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + bw := &brokenWriter{ResponseWriter: respRecorder} + Eventually(func() int { + // This should not be blocked by the circular calling of writeResponse and writeAdmissionResponse + webhook.ServeHTTP(bw, req) + return respRecorder.Body.Len() + }, time.Second*3).Should(Equal(0)) + }) + }) +}) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +type fakeHandler struct { + invoked bool + fn func(context.Context, Request) Response + decoder *Decoder + injectedString string +} + +func (h *fakeHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +func (h *fakeHandler) InjectString(s string) error { + h.injectedString = s + return nil +} + +func (h *fakeHandler) Handle(ctx context.Context, req Request) Response { + h.invoked = true + if h.fn != nil { + return h.fn(ctx, req) + } + return Response{AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + }} +} + +type brokenWriter struct { + http.ResponseWriter +} + +func (bw *brokenWriter) Write(buf []byte) (int, error) { + return 0, fmt.Errorf("mock: write: broken pipe") +} diff --git a/pkg/webhook/admission/inject.go b/pkg/webhook/admission/inject.go new file mode 100644 index 0000000000..d5af0d598f --- /dev/null +++ b/pkg/webhook/admission/inject.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. +type DecoderInjector interface { + InjectDecoder(*Decoder) error +} + +// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns +// false if i does not implement Decoder. +func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { + if s, ok := i.(DecoderInjector); ok { + return true, s.InjectDecoder(decoder) + } + return false, nil +} diff --git a/pkg/webhook/admission/multi.go b/pkg/webhook/admission/multi.go new file mode 100644 index 0000000000..26900cf2eb --- /dev/null +++ b/pkg/webhook/admission/multi.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type multiMutating []Handler + +func (hs multiMutating) Handle(ctx context.Context, req Request) Response { + patches := []jsonpatch.JsonPatchOperation{} + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch { + return Errored(http.StatusInternalServerError, + fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", + resp.PatchType, admissionv1.PatchTypeJSONPatch)) + } + patches = append(patches, resp.Patches...) + } + var err error + marshaledPatch, err := json.Marshal(patches) + if err != nil { + return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err)) + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Patch: marshaledPatch, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiMutating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiMutating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} + +// MultiMutatingHandler combines multiple mutating webhook handlers into a single +// mutating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. Users must take care to +// ensure patches are disjoint. +func MultiMutatingHandler(handlers ...Handler) Handler { + return multiMutating(handlers) +} + +type multiValidating []Handler + +func (hs multiValidating) Handle(ctx context.Context, req Request) Response { + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + } +} + +// MultiValidatingHandler combines multiple validating webhook handlers into a single +// validating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. +func MultiValidatingHandler(handlers ...Handler) Handler { + return multiValidating(handlers) +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiValidating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiValidating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} diff --git a/pkg/webhook/admission/multi_test.go b/pkg/webhook/admission/multi_test.go new file mode 100644 index 0000000000..a8b51872a2 --- /dev/null +++ b/pkg/webhook/admission/multi_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" +) + +var _ = Describe("Multi-Handler Admission Webhooks", func() { + alwaysAllow := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + }, + } + }, + } + alwaysDeny := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + }, + } + }, + } + + Context("with validating handlers", func() { + It("should deny the request if any handler denies the request", func() { + By("setting up a handler with accept and deny") + handler := MultiValidatingHandler(alwaysAllow, alwaysDeny) + + By("checking that the handler denies the request") + resp := handler.Handle(context.Background(), Request{}) + Expect(resp.Allowed).To(BeFalse()) + }) + + It("should allow the request if all handlers allow the request", func() { + By("setting up a handler with only accept") + handler := MultiValidatingHandler(alwaysAllow, alwaysAllow) + + By("checking that the handler allows the request") + resp := handler.Handle(context.Background(), Request{}) + Expect(resp.Allowed).To(BeTrue()) + }) + }) + + Context("with mutating handlers", func() { + patcher1 := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + Patches: []jsonpatch.JsonPatchOperation{ + { + Operation: "add", + Path: "/metadata/annotation/new-key", + Value: "new-value", + }, + { + Operation: "replace", + Path: "/spec/replicas", + Value: "2", + }, + }, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } + }, + } + patcher2 := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + Patches: []jsonpatch.JsonPatchOperation{ + { + Operation: "add", + Path: "/metadata/annotation/hello", + Value: "world", + }, + }, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } + }, + } + + It("should not return any patches if the request is denied", func() { + By("setting up a webhook with some patches and a deny") + handler := MultiMutatingHandler(patcher1, patcher2, alwaysDeny) + + By("checking that the handler denies the request and produces no patches") + resp := handler.Handle(context.Background(), Request{}) + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Patches).To(BeEmpty()) + }) + + It("should produce all patches if the requests are all allowed", func() { + By("setting up a webhook with some patches") + handler := MultiMutatingHandler(patcher1, patcher2, alwaysAllow) + + By("checking that the handler accepts the request and returns all patches") + resp := handler.Handle(context.Background(), Request{}) + Expect(resp.Allowed).To(BeTrue()) + Expect(resp.Patch).To(Equal([]byte( + `[{"op":"add","path":"/metadata/annotation/new-key","value":"new-value"},` + + `{"op":"replace","path":"/spec/replicas","value":"2"},{"op":"add","path":"/metadata/annotation/hello","value":"world"}]`))) + }) + }) +}) diff --git a/pkg/webhook/admission/response.go b/pkg/webhook/admission/response.go new file mode 100644 index 0000000000..24ff1dee3c --- /dev/null +++ b/pkg/webhook/admission/response.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Allowed constructs a response indicating that the given operation +// is allowed (without any patches). +func Allowed(reason string) Response { + return ValidationResponse(true, reason) +} + +// Denied constructs a response indicating that the given operation +// is not allowed. +func Denied(reason string) Response { + return ValidationResponse(false, reason) +} + +// Patched constructs a response indicating that the given operation is +// allowed, and that the target object should be modified by the given +// JSONPatch operations. +func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(reason) + resp.Patches = patches + + return resp +} + +// Errored creates a new Response for error-handling a request. +func Errored(code int32, err error) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: code, + Message: err.Error(), + }, + }, + } +} + +// ValidationResponse returns a response for admitting a request. +func ValidationResponse(allowed bool, reason string) Response { + code := http.StatusForbidden + if allowed { + code = http.StatusOK + } + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &metav1.Status{ + Code: int32(code), + }, + }, + } + if len(reason) > 0 { + resp.Result.Reason = metav1.StatusReason(reason) + } + return resp +} + +// PatchResponseFromRaw takes 2 byte arrays and returns a new response with json patch. +// The original object should be passed in as raw bytes to avoid the roundtripping problem +// described in https://github.com/kubernetes-sigs/kubebuilder/issues/510. +func PatchResponseFromRaw(original, current []byte) Response { + patches, err := jsonpatch.CreatePatch(original, current) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return Response{ + Patches: patches, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { + if len(patches) == 0 { + return nil + } + pt := admissionv1.PatchTypeJSONPatch + return &pt + }(), + }, + } +} + +// validationResponseFromStatus returns a response for admitting a request with provided Status object. +func validationResponseFromStatus(allowed bool, status metav1.Status) Response { + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &status, + }, + } + return resp +} + +// WithWarnings adds the given warnings to the Response. +// If any warnings were already given, they will not be overwritten. +func (r Response) WithWarnings(warnings ...string) Response { + r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...) + return r +} diff --git a/pkg/webhook/admission/response_test.go b/pkg/webhook/admission/response_test.go new file mode 100644 index 0000000000..e96b0e6ca7 --- /dev/null +++ b/pkg/webhook/admission/response_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "errors" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Admission Webhook Response Helpers", func() { + Describe("Allowed", func() { + It("should return an 'allowed' response", func() { + Expect(Allowed("")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Allowed("acceptable")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + Reason: "acceptable", + }, + }, + }, + )) + }) + }) + + Describe("Denied", func() { + It("should return a 'not allowed' response", func() { + Expect(Denied("")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: http.StatusForbidden, + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Denied("UNACCEPTABLE!")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: http.StatusForbidden, + Reason: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + }) + + Describe("Patched", func() { + ops := []jsonpatch.JsonPatchOperation{ + { + Operation: "replace", + Path: "/spec/selector/matchLabels", + Value: map[string]string{"foo": "bar"}, + }, + { + Operation: "delete", + Path: "/spec/replicas", + }, + } + It("should return an 'allowed' response with the given patches", func() { + Expect(Patched("", ops...)).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + Patches: ops, + }, + )) + }) + It("should populate a status with a reason when a reason is given", func() { + Expect(Patched("some changes", ops...)).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + Reason: "some changes", + }, + }, + Patches: ops, + }, + )) + }) + }) + + Describe("Errored", func() { + It("should return a denied response with an error", func() { + err := errors.New("this is an error") + expected := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: http.StatusBadRequest, + Message: err.Error(), + }, + }, + } + resp := Errored(http.StatusBadRequest, err) + Expect(resp).To(Equal(expected)) + }) + }) + + Describe("ValidationResponse", func() { + It("should populate a status with a reason when a reason is given", func() { + By("checking that a message is populated for 'allowed' responses") + Expect(ValidationResponse(true, "acceptable")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + Reason: "acceptable", + }, + }, + }, + )) + + By("checking that a message is populated for 'denied' responses") + Expect(ValidationResponse(false, "UNACCEPTABLE!")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: http.StatusForbidden, + Reason: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + + It("should return an admission decision", func() { + By("checking that it returns an 'allowed' response when allowed is true") + Expect(ValidationResponse(true, "")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + }, + )) + + By("checking that it returns an 'denied' response when allowed is false") + Expect(ValidationResponse(false, "")).To(Equal( + Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: http.StatusForbidden, + }, + }, + }, + )) + }) + }) + + Describe("PatchResponseFromRaw", func() { + It("should return an 'allowed' response with a patch of the diff between two sets of serialized JSON", func() { + expected := Response{ + Patches: []jsonpatch.JsonPatchOperation{ + {Operation: "replace", Path: "/a", Value: "bar"}, + }, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } + resp := PatchResponseFromRaw([]byte(`{"a": "foo"}`), []byte(`{"a": "bar"}`)) + Expect(resp).To(Equal(expected)) + }) + }) + + Describe("WithWarnings", func() { + It("should add the warnings to the existing response without removing any existing warnings", func() { + initialResponse := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Warnings: []string{"existing-warning"}, + }, + } + warnings := []string{"additional-warning-1", "additional-warning-2"} + expectedResponse := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Warnings: []string{"existing-warning", "additional-warning-1", "additional-warning-2"}, + }, + } + + Expect(initialResponse.WithWarnings(warnings...)).To(Equal(expectedResponse)) + }) + }) +}) diff --git a/pkg/webhook/admission/validator.go b/pkg/webhook/admission/validator.go new file mode 100644 index 0000000000..4b27e75ede --- /dev/null +++ b/pkg/webhook/admission/validator.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + goerrors "errors" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// Validator defines functions for validating an operation. +type Validator interface { + runtime.Object + ValidateCreate() error + ValidateUpdate(old runtime.Object) error + ValidateDelete() error +} + +// ValidatingWebhookFor creates a new Webhook for validating the provided type. +func ValidatingWebhookFor(validator Validator) *Webhook { + return &Webhook{ + Handler: &validatingHandler{validator: validator}, + } +} + +type validatingHandler struct { + validator Validator + decoder *Decoder +} + +var _ DecoderInjector = &validatingHandler{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + + // Get the object in the request + obj := h.validator.DeepCopyObject().(Validator) + if req.Operation == v1.Create { + err := h.decoder.Decode(req, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateCreate() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Update { + oldObj := obj.DeepCopyObject() + + err := h.decoder.DecodeRaw(req.Object, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + err = h.decoder.DecodeRaw(req.OldObject, oldObj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateUpdate(oldObj) + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Delete { + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + err := h.decoder.DecodeRaw(req.OldObject, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateDelete() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + return Allowed("") +} diff --git a/pkg/webhook/admission/validator_custom.go b/pkg/webhook/admission/validator_custom.go new file mode 100644 index 0000000000..33252f1134 --- /dev/null +++ b/pkg/webhook/admission/validator_custom.go @@ -0,0 +1,113 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "fmt" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomValidator defines functions for validating an operation. +type CustomValidator interface { + ValidateCreate(ctx context.Context, obj runtime.Object) error + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error + ValidateDelete(ctx context.Context, obj runtime.Object) error +} + +// WithCustomValidator creates a new Webhook for validating the provided type. +func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { + return &Webhook{ + Handler: &validatorForType{object: obj, validator: validator}, + } +} + +type validatorForType struct { + validator CustomValidator + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &validatorForType{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatorForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + + var err error + switch req.Operation { + case v1.Create: + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateCreate(ctx, obj) + case v1.Update: + oldObj := obj.DeepCopyObject() + if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + if err := h.decoder.DecodeRaw(req.OldObject, oldObj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateUpdate(ctx, oldObj, obj) + case v1.Delete: + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + if err := h.decoder.DecodeRaw(req.OldObject, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateDelete(ctx, obj) + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + } + + // Check the error message first. + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Return allowed if everything succeeded. + return Allowed("") +} diff --git a/pkg/webhook/admission/validator_test.go b/pkg/webhook/admission/validator_test.go new file mode 100644 index 0000000000..7fe19268d9 --- /dev/null +++ b/pkg/webhook/admission/validator_test.go @@ -0,0 +1,240 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + goerrors "errors" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission/admissiontest" + + admissionv1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" +) + +var fakeValidatorVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "fakeValidator"} + +var _ = Describe("validatingHandler", func() { + + decoder, _ := NewDecoder(scheme.Scheme) + + Context("when dealing with successful results", func() { + + f := &admissiontest.FakeValidator{ErrorToReturn: nil, GVKToReturn: fakeValidatorVK} + handler := validatingHandler{validator: f, decoder: decoder} + + It("should return 200 in response when create succeeds", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should return 200 in response when update succeeds", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should return 200 in response when delete succeeds", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + }) + + Context("when dealing with Status errors", func() { + + expectedError := &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Message: "some message", + Code: http.StatusUnprocessableEntity, + }, + } + f := &admissiontest.FakeValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK} + handler := validatingHandler{validator: f, decoder: decoder} + + It("should propagate the Status from ValidateCreate's return value to the HTTP response", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + It("should propagate the Status from ValidateUpdate's return value to the HTTP response", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + It("should propagate the Status from ValidateDelete's return value to the HTTP response", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + }) + Context("when dealing with non-status errors", func() { + + expectedError := goerrors.New("some error") + f := &admissiontest.FakeValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK} + handler := validatingHandler{validator: f, decoder: decoder} + + It("should return 403 response when ValidateCreate with error message embedded", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(string(response.Result.Reason)).Should(Equal(expectedError.Error())) + + }) + + It("should return 403 response when ValidateUpdate returns non-APIStatus error", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(string(response.Result.Reason)).Should(Equal(expectedError.Error())) + + }) + + It("should return 403 response when ValidateDelete returns non-APIStatus error", func() { + + response := handler.Handle(context.TODO(), Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: handler.validator, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(string(response.Result.Reason)).Should(Equal(expectedError.Error())) + + }) + + }) + + PIt("should return 400 in response when create fails on decode", func() {}) + + PIt("should return 400 in response when update fails on decoding new object", func() {}) + + PIt("should return 400 in response when update fails on decoding old object", func() {}) + + PIt("should return 400 in response when delete fails on decode", func() {}) + +}) diff --git a/pkg/webhook/admission/webhook.go b/pkg/webhook/admission/webhook.go new file mode 100644 index 0000000000..d10b97dddb --- /dev/null +++ b/pkg/webhook/admission/webhook.go @@ -0,0 +1,296 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/go-logr/logr" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + admissionv1.AdmissionRequest +} + +// Response is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type Response struct { + // Patches are the JSON patches for mutating webhooks. + // Using this instead of setting Response.Patch to minimize + // overhead of serialization and deserialization. + // Patches set here will override any patches in the response, + // so leave this empty if you want to set the patch response directly. + Patches []jsonpatch.JsonPatchOperation + // AdmissionResponse is the raw admission response. + // The Patch field in it will be overwritten by the listed patches. + admissionv1.AdmissionResponse +} + +// Complete populates any fields that are yet to be set in +// the underlying AdmissionResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + // ensure that we have a valid status code + if r.Result == nil { + r.Result = &metav1.Status{} + } + if r.Result.Code == 0 { + r.Result.Code = http.StatusOK + } + // TODO(directxman12): do we need to populate this further, and/or + // is code actually necessary (the same webhook doesn't use it) + + if len(r.Patches) == 0 { + return nil + } + + var err error + r.Patch, err = json.Marshal(r.Patches) + if err != nil { + return err + } + patchType := admissionv1.PatchTypeJSONPatch + r.PatchType = &patchType + + return nil +} + +// Handler can handle an AdmissionRequest. +type Handler interface { + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response +} + +// HandlerFunc implements Handler interface using a single function. +type HandlerFunc func(context.Context, Request) Response + +var _ Handler = HandlerFunc(nil) + +// Handle process the AdmissionRequest by invoking the underlying function. +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { + return f(ctx, req) +} + +// Webhook represents each individual webhook. +// +// It must be registered with a webhook.Server or +// populated by StandaloneWebhook to be ran on an arbitrary HTTP server. +type Webhook struct { + // Handler actually processes an admission request returning whether it was allowed or denied, + // and potentially patches to apply to the handler. + Handler Handler + + // RecoverPanic indicates whether the panic caused by webhook should be recovered. + RecoverPanic bool + + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + // decoder is constructed on receiving a scheme and passed down to then handler + decoder *Decoder + + log logr.Logger +} + +// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l + return nil +} + +// WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. +func (wh *Webhook) WithRecoverPanic(recoverPanic bool) *Webhook { + wh.RecoverPanic = recoverPanic + return wh +} + +// Handle processes AdmissionRequest. +// If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. +// If the webhook is validating type, it delegates the AdmissionRequest to each handler and +// deny the request if anyone denies. +func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) { + if wh.RecoverPanic { + defer func() { + if r := recover(); r != nil { + for _, fn := range utilruntime.PanicHandlers { + fn(r) + } + response = Errored(http.StatusInternalServerError, fmt.Errorf("panic: %v [recovered]", r)) + return + } + }() + } + + resp := wh.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + wh.log.Error(err, "unable to encode response") + return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + } + + return resp +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + // TODO(directxman12): we should have a better way to pass this down + + var err error + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + // inject the decoder here too, just in case the order of calling this is not + // scheme first, then inject func + if wh.Handler != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { + return err + } + } + + return nil +} + +// GetDecoder returns a decoder to decode the objects embedded in admission requests. +// It may be nil if we haven't received a scheme to use to determine object types yet. +func (wh *Webhook) GetDecoder() *Decoder { + return wh.decoder +} + +// InjectFunc injects the field setter into the webhook. +func (wh *Webhook) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + + // also inject a decoder, and wrap this so that we get a setFields + // that injects a decoder (hopefully things don't ignore the duplicate + // InjectorInto call). + + var setFields inject.Func + setFields = func(target interface{}) error { + if err := f(target); err != nil { + return err + } + + if _, err := inject.InjectorInto(setFields, target); err != nil { + return err + } + + if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { + return err + } + + return nil + } + + return setFields(wh.Handler) +} + +// StandaloneOptions let you configure a StandaloneWebhook. +type StandaloneOptions struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + // Logger to be used by the webhook. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + // MetricsPath is used for labelling prometheus metrics + // by the path is served on. + // If none is set, prometheus metrics will not be generated. + MetricsPath string +} + +// StandaloneWebhook prepares a webhook for use without a webhook.Server, +// passing in the information normally populated by webhook.Server +// and instrumenting the webhook with metrics. +// +// Use this to attach your webhook to an arbitrary HTTP server or mux. +// +// Note that you are responsible for terminating TLS if you use StandaloneWebhook +// in your own server/mux. In order to be accessed by a kubernetes cluster, +// all webhook servers require TLS. +func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + if err := hook.InjectScheme(opts.Scheme); err != nil { + return nil, err + } + + if opts.Logger.GetSink() == nil { + opts.Logger = logf.RuntimeLog.WithName("webhook") + } + hook.log = opts.Logger + + if opts.MetricsPath == "" { + return hook, nil + } + return metrics.InstrumentedHook(opts.MetricsPath, hook), nil +} + +// requestContextKey is how we find the admission.Request in a context.Context. +type requestContextKey struct{} + +// RequestFromContext returns an admission.Request from ctx. +func RequestFromContext(ctx context.Context) (Request, error) { + if v, ok := ctx.Value(requestContextKey{}).(Request); ok { + return v, nil + } + + return Request{}, errors.New("admission.Request not found in context") +} + +// NewContextWithRequest returns a new Context, derived from ctx, which carries the +// provided admission.Request. +func NewContextWithRequest(ctx context.Context, req Request) context.Context { + return context.WithValue(ctx, requestContextKey{}, req) +} diff --git a/pkg/webhook/admission/webhook_test.go b/pkg/webhook/admission/webhook_test.go new file mode 100644 index 0000000000..6a8570808a --- /dev/null +++ b/pkg/webhook/admission/webhook_test.go @@ -0,0 +1,287 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + machinerytypes "k8s.io/apimachinery/pkg/types" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ = Describe("Admission Webhooks", func() { + allowHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + }, + } + }, + } + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + + return webhook + } + + It("should invoke the handler to get a response", func() { + By("setting up a webhook with an allow handler") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that it allowed the request") + Expect(resp.Allowed).To(BeTrue()) + }) + + It("should ensure that the response's UID is set to the request's UID", func() { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{AdmissionRequest: admissionv1.AdmissionRequest{UID: "foobar"}}) + + By("checking that the response share's the request's UID") + Expect(resp.UID).To(Equal(machinerytypes.UID("foobar"))) + }) + + It("should populate the status on a response if one is not provided", func() { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that the response share's the request's UID") + Expect(resp.Result).To(Equal(&metav1.Status{Code: http.StatusOK})) + }) + + It("shouldn't overwrite the status on a response", func() { + By("setting up a webhook that sets a status") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{Message: "Ground Control to Major Tom"}, + }, + } + }), + log: logf.RuntimeLog.WithName("webhook"), + } + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that the message is intact") + Expect(resp.Result).NotTo(BeNil()) + Expect(resp.Result.Message).To(Equal("Ground Control to Major Tom")) + }) + + It("should serialize patch operations into a single jsonpatch blob", func() { + By("setting up a webhook with a patching handler") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + return Patched("", jsonpatch.Operation{Operation: "add", Path: "/a", Value: 2}, jsonpatch.Operation{Operation: "replace", Path: "/b", Value: 4}) + }), + log: logf.RuntimeLog.WithName("webhook"), + } + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that a JSON patch is populated on the response") + patchType := admissionv1.PatchTypeJSONPatch + Expect(resp.PatchType).To(Equal(&patchType)) + Expect(resp.Patch).To(Equal([]byte(`[{"op":"add","path":"/a","value":2},{"op":"replace","path":"/b","value":4}]`))) + }) + + Describe("dependency injection", func() { + It("should set dependencies passed in on the handler", func() { + By("setting up a webhook and injecting it with a injection func that injects a string") + setFields := func(target interface{}) error { + inj, ok := target.(stringInjector) + if !ok { + return nil + } + + return inj.InjectString("something") + } + handler := &fakeHandler{} + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + Expect(setFields(webhook)).To(Succeed()) + Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) + + By("checking that the string was injected") + Expect(handler.injectedString).To(Equal("something")) + }) + + It("should inject a decoder into the handler", func() { + By("setting up a webhook and injecting it with a injection func that injects a scheme") + setFields := func(target interface{}) error { + if _, err := inject.SchemeInto(runtime.NewScheme(), target); err != nil { + return err + } + return nil + } + handler := &fakeHandler{} + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + Expect(setFields(webhook)).To(Succeed()) + Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) + + By("checking that the decoder was injected") + Expect(handler.decoder).NotTo(BeNil()) + }) + + It("should pass a setFields that also injects a decoder into sub-dependencies", func() { + By("setting up a webhook and injecting it with a injection func that injects a scheme") + setFields := func(target interface{}) error { + if _, err := inject.SchemeInto(runtime.NewScheme(), target); err != nil { + return err + } + return nil + } + handler := &handlerWithSubDependencies{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + return Response{} + }), + dep: &subDep{}, + } + webhook := &Webhook{ + Handler: handler, + } + Expect(setFields(webhook)).To(Succeed()) + Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) + + By("checking that setFields sets the decoder as well") + Expect(handler.dep.decoder).NotTo(BeNil()) + }) + }) + + Describe("panic recovery", func() { + It("should recover panic if RecoverPanic is true", func() { + panicHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + panic("injected panic") + }, + } + webhook := &Webhook{ + Handler: handler, + RecoverPanic: true, + log: logf.RuntimeLog.WithName("webhook"), + } + + return webhook + } + + By("setting up a webhook with a panicking handler") + webhook := panicHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that it errored the request") + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Result.Code).To(Equal(int32(http.StatusInternalServerError))) + Expect(resp.Result.Message).To(Equal("panic: injected panic [recovered]")) + }) + + It("should not recover panic if RecoverPanic is false by default", func() { + panicHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + panic("injected panic") + }, + } + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + + return webhook + } + + By("setting up a webhook with a panicking handler") + defer func() { + Expect(recover()).ShouldNot(BeNil()) + }() + webhook := panicHandler() + + By("invoking the webhook") + webhook.Handle(context.Background(), Request{}) + }) + }) +}) + +var _ = Describe("Should be able to write/read admission.Request to/from context", func() { + ctx := context.Background() + testRequest := Request{ + admissionv1.AdmissionRequest{ + UID: "test-uid", + }, + } + + ctx = NewContextWithRequest(ctx, testRequest) + + gotRequest, err := RequestFromContext(ctx) + Expect(err).To(Not(HaveOccurred())) + Expect(gotRequest).To(Equal(testRequest)) +}) + +type stringInjector interface { + InjectString(s string) error +} + +type handlerWithSubDependencies struct { + Handler + dep *subDep +} + +func (h *handlerWithSubDependencies) InjectFunc(f inject.Func) error { + return f(h.dep) +} + +type subDep struct { + decoder *Decoder +} + +func (d *subDep) InjectDecoder(dec *Decoder) error { + d.decoder = dec + return nil +} diff --git a/pkg/webhook/alias.go b/pkg/webhook/alias.go new file mode 100644 index 0000000000..293137db49 --- /dev/null +++ b/pkg/webhook/alias.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "gomodules.xyz/jsonpatch/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// define some aliases for common bits of the webhook functionality + +// Defaulter defines functions for setting defaults on resources. +type Defaulter = admission.Defaulter + +// Validator defines functions for validating an operation. +type Validator = admission.Validator + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter = admission.CustomDefaulter + +// CustomValidator defines functions for validating an operation. +type CustomValidator = admission.CustomValidator + +// AdmissionRequest defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type AdmissionRequest = admission.Request + +// AdmissionResponse is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type AdmissionResponse = admission.Response + +// Admission is webhook suitable for registration with the server +// an admission webhook that validates API operations and potentially +// mutates their contents. +type Admission = admission.Webhook + +// AdmissionHandler knows how to process admission requests, validating them, +// and potentially mutating the objects they contain. +type AdmissionHandler = admission.Handler + +// AdmissionDecoder knows how to decode objects from admission requests. +type AdmissionDecoder = admission.Decoder + +// JSONPatchOp represents a single JSONPatch patch operation. +type JSONPatchOp = jsonpatch.Operation + +var ( + // Allowed indicates that the admission request should be allowed for the given reason. + Allowed = admission.Allowed + + // Denied indicates that the admission request should be denied for the given reason. + Denied = admission.Denied + + // Patched indicates that the admission request should be allowed for the given reason, + // and that the contained object should be mutated using the given patches. + Patched = admission.Patched + + // Errored indicates that an error occurred in the admission request. + Errored = admission.Errored +) diff --git a/pkg/webhook/authentication/authentication_suite_test.go b/pkg/webhook/authentication/authentication_suite_test.go new file mode 100644 index 0000000000..b993d1ef80 --- /dev/null +++ b/pkg/webhook/authentication/authentication_suite_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestAuthenticationWebhook(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Authentication Webhook Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}, 60) diff --git a/pkg/webhook/authentication/doc.go b/pkg/webhook/authentication/doc.go new file mode 100644 index 0000000000..a1b45c1aef --- /dev/null +++ b/pkg/webhook/authentication/doc.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package authentication provides implementation for authentication webhook and +methods to implement authentication webhook handlers. + +See examples/tokenreview/ for an example of authentication webhooks. +*/ +package authentication + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("authentication") diff --git a/pkg/webhook/authentication/http.go b/pkg/webhook/authentication/http.go new file mode 100644 index 0000000000..59832e8a07 --- /dev/null +++ b/pkg/webhook/authentication/http.go @@ -0,0 +1,148 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var authenticationScheme = runtime.NewScheme() +var authenticationCodecs = serializer.NewCodecFactory(authenticationScheme) + +func init() { + utilruntime.Must(authenticationv1.AddToScheme(authenticationScheme)) + utilruntime.Must(authenticationv1beta1.AddToScheme(authenticationScheme)) +} + +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var body []byte + var err error + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + var reviewResponse Response + if r.Body == nil { + err = errors.New("request body is empty") + wh.log.Error(err, "bad request") + reviewResponse = Errored(err) + wh.writeResponse(w, reviewResponse) + return + } + + defer r.Body.Close() + if body, err = io.ReadAll(r.Body); err != nil { + wh.log.Error(err, "unable to read the body from the incoming request") + reviewResponse = Errored(err) + wh.writeResponse(w, reviewResponse) + return + } + + // verify the content type is accurate + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + err = fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + reviewResponse = Errored(err) + wh.writeResponse(w, reviewResponse) + return + } + + // Both v1 and v1beta1 TokenReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. The v1beta1 api is deprecated as of 1.19 and will be + // removed in authenticationv1.22. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 TokenReview to authenticationv1. + // The actual TokenReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. + req := Request{} + ar := unversionedTokenReview{} + // avoid an extra copy + ar.TokenReview = &req.TokenReview + ar.SetGroupVersionKind(authenticationv1.SchemeGroupVersion.WithKind("TokenReview")) + _, actualTokRevGVK, err := authenticationCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.log.Error(err, "unable to decode the request") + reviewResponse = Errored(err) + wh.writeResponse(w, reviewResponse) + return + } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind) + + if req.Spec.Token == "" { + err = errors.New("token is empty") + wh.log.Error(err, "bad request") + reviewResponse = Errored(err) + wh.writeResponse(w, reviewResponse) + return + } + + reviewResponse = wh.Handle(ctx, req) + wh.writeResponseTyped(w, reviewResponse, actualTokRevGVK) +} + +// writeResponse writes response to w generically, i.e. without encoding GVK information. +func (wh *Webhook) writeResponse(w io.Writer, response Response) { + wh.writeTokenResponse(w, response.TokenReview) +} + +// writeResponseTyped writes response to w with GVK set to tokRevGVK, which is necessary +// if multiple TokenReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, tokRevGVK *schema.GroupVersionKind) { + ar := response.TokenReview + + // Default to a v1 TokenReview, otherwise the API server may not recognize the request + // if multiple TokenReview versions are permitted by the webhook config. + if tokRevGVK == nil || *tokRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(authenticationv1.SchemeGroupVersion.WithKind("TokenReview")) + } else { + ar.SetGroupVersionKind(*tokRevGVK) + } + wh.writeTokenResponse(w, ar) +} + +// writeTokenResponse writes ar to w. +func (wh *Webhook) writeTokenResponse(w io.Writer, ar authenticationv1.TokenReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.log.Error(err, "unable to encode the response") + wh.writeResponse(w, Errored(err)) + } + res := ar + if log := wh.log; log.V(1).Enabled() { + log.V(1).Info("wrote response", "UID", res.UID, "authenticated", res.Status.Authenticated) + } +} + +// unversionedTokenReview is used to decode both v1 and v1beta1 TokenReview types. +type unversionedTokenReview struct { + *authenticationv1.TokenReview +} + +var _ runtime.Object = &unversionedTokenReview{} diff --git a/pkg/webhook/authentication/http_test.go b/pkg/webhook/authentication/http_test.go new file mode 100644 index 0000000000..882a77e427 --- /dev/null +++ b/pkg/webhook/authentication/http_test.go @@ -0,0 +1,223 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + authenticationv1 "k8s.io/api/authentication/v1" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ = Describe("Authentication Webhooks", func() { + + const ( + gvkJSONv1 = `"kind":"TokenReview","apiVersion":"authentication.k8s.io/v1"` + ) + + Describe("HTTP Handler", func() { + var respRecorder *httptest.ResponseRecorder + webhook := &Webhook{ + Handler: nil, + } + BeforeEach(func() { + respRecorder = &httptest.ResponseRecorder{ + Body: bytes.NewBuffer(nil), + } + _, err := inject.LoggerInto(log.WithName("test-webhook"), webhook) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return bad-request when given an empty body", func() { + req := &http.Request{Body: nil} + + expected := `{"metadata":{"creationTimestamp":null},"spec":{},"status":{"user":{},"error":"request body is empty"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given the wrong content-type", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/foo"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBuffer(nil)}, + } + + expected := `{"metadata":{"creationTimestamp":null},"spec":{},"status":{"user":{},"error":"contentType=application/foo, expected application/json"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given an undecodable body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString("{")}, + } + + expected := `{"metadata":{"creationTimestamp":null},"spec":{},"status":{"user":{},"error":"couldn't get version/kind; json parse error: unexpected end of JSON input"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given an undecodable body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":""}}`)}, + } + + expected := `{"metadata":{"creationTimestamp":null},"spec":{},"status":{"user":{},"error":"token is empty"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the response given by the handler with version defaulted to v1", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"metadata":{"creationTimestamp":null},"spec":{},"status":{"authenticated":true,"user":{}}} +`, gvkJSONv1) + + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"spec":{"token":"foobar"}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"metadata":{"creationTimestamp":null},"spec":{},"status":{"authenticated":true,"user":{}}} +`, gvkJSONv1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should present the Context from the HTTP request, if any", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + type ctxkey int + const key ctxkey = 1 + const value = "from-ctx" + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + <-ctx.Done() + return Authenticated(ctx.Value(key).(string), authenticationv1.UserInfo{}) + }, + }, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"metadata":{"creationTimestamp":null},"spec":{},"status":{"authenticated":true,"user":{},"error":%q}} +`, gvkJSONv1, value) + + ctx, cancel := context.WithCancel(context.WithValue(context.Background(), key, value)) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should mutate the Context from the HTTP request, if func supplied", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + type ctxkey int + const key ctxkey = 1 + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Authenticated(ctx.Value(key).(string), authenticationv1.UserInfo{}) + }, + }, + WithContextFunc: func(ctx context.Context, r *http.Request) context.Context { + return context.WithValue(ctx, key, r.Header["Content-Type"][0]) + }, + log: logf.RuntimeLog.WithName("webhook"), + } + + expected := fmt.Sprintf(`{%s,"metadata":{"creationTimestamp":null},"spec":{},"status":{"authenticated":true,"user":{},"error":%q}} +`, gvkJSONv1, "application/json") + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + }) +}) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +type fakeHandler struct { + invoked bool + fn func(context.Context, Request) Response + injectedString string +} + +func (h *fakeHandler) InjectString(s string) error { + h.injectedString = s + return nil +} + +func (h *fakeHandler) Handle(ctx context.Context, req Request) Response { + h.invoked = true + if h.fn != nil { + return h.fn(ctx, req) + } + return Response{TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + }, + }} +} diff --git a/pkg/webhook/authentication/response.go b/pkg/webhook/authentication/response.go new file mode 100644 index 0000000000..3e1d362049 --- /dev/null +++ b/pkg/webhook/authentication/response.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + authenticationv1 "k8s.io/api/authentication/v1" +) + +// Authenticated constructs a response indicating that the given token +// is valid. +func Authenticated(reason string, user authenticationv1.UserInfo) Response { + return ReviewResponse(true, user, reason) +} + +// Unauthenticated constructs a response indicating that the given token +// is not valid. +func Unauthenticated(reason string, user authenticationv1.UserInfo) Response { + return ReviewResponse(false, authenticationv1.UserInfo{}, reason) +} + +// Errored creates a new Response for error-handling a request. +func Errored(err error) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Spec: authenticationv1.TokenReviewSpec{}, + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + Error: err.Error(), + }, + }, + } +} + +// ReviewResponse returns a response for admitting a request. +func ReviewResponse(authenticated bool, user authenticationv1.UserInfo, err string, audiences ...string) Response { + resp := Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: authenticated, + User: user, + Audiences: audiences, + }, + }, + } + if len(err) > 0 { + resp.TokenReview.Status.Error = err + } + return resp +} diff --git a/pkg/webhook/authentication/response_test.go b/pkg/webhook/authentication/response_test.go new file mode 100644 index 0000000000..22c1ee3ba7 --- /dev/null +++ b/pkg/webhook/authentication/response_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + authenticationv1 "k8s.io/api/authentication/v1" +) + +var _ = Describe("Authentication Webhook Response Helpers", func() { + Describe("Authenticated", func() { + It("should return an 'allowed' response", func() { + Expect(Authenticated("", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Authenticated("acceptable", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + Error: "acceptable", + }, + }, + }, + )) + }) + }) + + Describe("Unauthenticated", func() { + It("should return a 'not allowed' response", func() { + Expect(Unauthenticated("", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "", + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Unauthenticated("UNACCEPTABLE!", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + }) + + Describe("Errored", func() { + It("should return a unauthenticated response with an error", func() { + err := errors.New("this is an error") + expected := Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: err.Error(), + }, + }, + } + resp := Errored(err) + Expect(resp).To(Equal(expected)) + }) + }) + + Describe("ReviewResponse", func() { + It("should populate a status with a Error when a reason is given", func() { + By("checking that a message is populated for 'allowed' responses") + Expect(ReviewResponse(true, authenticationv1.UserInfo{}, "acceptable")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + Error: "acceptable", + }, + }, + }, + )) + + By("checking that a message is populated for 'Unauthenticated' responses") + Expect(ReviewResponse(false, authenticationv1.UserInfo{}, "UNACCEPTABLE!")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + + It("should return an authentication decision", func() { + By("checking that it returns an 'allowed' response when allowed is true") + Expect(ReviewResponse(true, authenticationv1.UserInfo{}, "")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + + By("checking that it returns an 'Unauthenticated' response when allowed is false") + Expect(ReviewResponse(false, authenticationv1.UserInfo{}, "")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + }) + }) +}) diff --git a/pkg/webhook/authentication/webhook.go b/pkg/webhook/authentication/webhook.go new file mode 100644 index 0000000000..b1229e422e --- /dev/null +++ b/pkg/webhook/authentication/webhook.go @@ -0,0 +1,129 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "context" + "errors" + "net/http" + + "github.com/go-logr/logr" + authenticationv1 "k8s.io/api/authentication/v1" + + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an authentication handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + authenticationv1.TokenReview +} + +// Response is the output of an authentication handler. +// It contains a response indicating if a given +// operation is allowed. +type Response struct { + authenticationv1.TokenReview +} + +// Complete populates any fields that are yet to be set in +// the underlying TokenResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + return nil +} + +// Handler can handle an TokenReview. +type Handler interface { + // Handle yields a response to an TokenReview. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response +} + +// HandlerFunc implements Handler interface using a single function. +type HandlerFunc func(context.Context, Request) Response + +var _ Handler = HandlerFunc(nil) + +// Handle process the TokenReview by invoking the underlying function. +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { + return f(ctx, req) +} + +// Webhook represents each individual webhook. +type Webhook struct { + // Handler actually processes an authentication request returning whether it was authenticated or unauthenticated, + // and potentially patches to apply to the handler. + Handler Handler + + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + log logr.Logger +} + +// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l + return nil +} + +// Handle processes TokenReview. +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + wh.log.Error(err, "unable to encode response") + return Errored(errUnableToEncodeResponse) + } + + return resp +} + +// InjectFunc injects the field setter into the webhook. +func (wh *Webhook) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + + var setFields inject.Func + setFields = func(target interface{}) error { + if err := f(target); err != nil { + return err + } + + if _, err := inject.InjectorInto(setFields, target); err != nil { + return err + } + + return nil + } + + return setFields(wh.Handler) +} diff --git a/pkg/webhook/authentication/webhook_test.go b/pkg/webhook/authentication/webhook_test.go new file mode 100644 index 0000000000..55849ece32 --- /dev/null +++ b/pkg/webhook/authentication/webhook_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + machinerytypes "k8s.io/apimachinery/pkg/types" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ = Describe("Authentication Webhooks", func() { + allowHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + }, + }, + } + }, + } + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + + return webhook + } + + It("should invoke the handler to get a response", func() { + By("setting up a webhook with an allow handler") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that it allowed the request") + Expect(resp.Status.Authenticated).To(BeTrue()) + }) + + It("should ensure that the response's UID is set to the request's UID", func() { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{TokenReview: authenticationv1.TokenReview{ObjectMeta: metav1.ObjectMeta{UID: "foobar"}}}) + + By("checking that the response share's the request's UID") + Expect(resp.UID).To(Equal(machinerytypes.UID("foobar"))) + }) + + It("should populate the status on a response if one is not provided", func() { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that the response share's the request's UID") + Expect(resp.Status).To(Equal(authenticationv1.TokenReviewStatus{Authenticated: true})) + }) + + It("shouldn't overwrite the status on a response", func() { + By("setting up a webhook that sets a status") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + Error: "Ground Control to Major Tom", + }, + }, + } + }), + log: logf.RuntimeLog.WithName("webhook"), + } + + By("invoking the webhook") + resp := webhook.Handle(context.Background(), Request{}) + + By("checking that the message is intact") + Expect(resp.Status).NotTo(BeNil()) + Expect(resp.Status.Authenticated).To(BeTrue()) + Expect(resp.Status.Error).To(Equal("Ground Control to Major Tom")) + }) + + Describe("dependency injection", func() { + It("should set dependencies passed in on the handler", func() { + By("setting up a webhook and injecting it with a injection func that injects a string") + setFields := func(target interface{}) error { + inj, ok := target.(stringInjector) + if !ok { + return nil + } + + return inj.InjectString("something") + } + handler := &fakeHandler{} + webhook := &Webhook{ + Handler: handler, + log: logf.RuntimeLog.WithName("webhook"), + } + Expect(setFields(webhook)).To(Succeed()) + Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) + + By("checking that the string was injected") + Expect(handler.injectedString).To(Equal("something")) + }) + + }) +}) + +type stringInjector interface { + InjectString(s string) error +} diff --git a/pkg/webhook/conversion/conversion.go b/pkg/webhook/conversion/conversion.go new file mode 100644 index 0000000000..a5b7a282ce --- /dev/null +++ b/pkg/webhook/conversion/conversion.go @@ -0,0 +1,345 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides implementation for CRD conversion webhook that implements handler for version conversion requests for types that are convertible. + +See pkg/conversion for interface definitions required to ensure an API Type is convertible. +*/ +package conversion + +import ( + "encoding/json" + "fmt" + "net/http" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("conversion-webhook") +) + +// Webhook implements a CRD conversion webhook HTTP handler. +type Webhook struct { + scheme *runtime.Scheme + decoder *Decoder +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + var err error + wh.scheme = s + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + return nil +} + +// ensure Webhook implements http.Handler +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + convertReview := &apix.ConversionReview{} + err := json.NewDecoder(r.Body).Decode(convertReview) + if err != nil { + log.Error(err, "failed to read conversion request") + w.WriteHeader(http.StatusBadRequest) + return + } + + // TODO(droot): may be move the conversion logic to a separate module to + // decouple it from the http layer ? + resp, err := wh.handleConvertRequest(convertReview.Request) + if err != nil { + log.Error(err, "failed to convert", "request", convertReview.Request.UID) + convertReview.Response = errored(err) + } else { + convertReview.Response = resp + } + convertReview.Response.UID = convertReview.Request.UID + convertReview.Request = nil + + err = json.NewEncoder(w).Encode(convertReview) + if err != nil { + log.Error(err, "failed to write response") + return + } +} + +// handles a version conversion request. +func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { + if req == nil { + return nil, fmt.Errorf("conversion request is nil") + } + var objects []runtime.RawExtension + + for _, obj := range req.Objects { + src, gvk, err := wh.decoder.Decode(obj.Raw) + if err != nil { + return nil, err + } + dst, err := wh.allocateDstObject(req.DesiredAPIVersion, gvk.Kind) + if err != nil { + return nil, err + } + err = wh.convertObject(src, dst) + if err != nil { + return nil, err + } + objects = append(objects, runtime.RawExtension{Object: dst}) + } + return &apix.ConversionResponse{ + UID: req.UID, + ConvertedObjects: objects, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + }, nil +} + +// convertObject will convert given a src object to dst object. +// Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 +// without compromising readability, so disabling gocyclo linter +func (wh *Webhook) convertObject(src, dst runtime.Object) error { + srcGVK := src.GetObjectKind().GroupVersionKind() + dstGVK := dst.GetObjectKind().GroupVersionKind() + + if srcGVK.GroupKind() != dstGVK.GroupKind() { + return fmt.Errorf("src %T and dst %T does not belong to same API Group", src, dst) + } + + if srcGVK == dstGVK { + return fmt.Errorf("conversion is not allowed between same type %T", src) + } + + srcIsHub, dstIsHub := isHub(src), isHub(dst) + srcIsConvertible, dstIsConvertible := isConvertible(src), isConvertible(dst) + + switch { + case srcIsHub && dstIsConvertible: + return dst.(conversion.Convertible).ConvertFrom(src.(conversion.Hub)) + case dstIsHub && srcIsConvertible: + return src.(conversion.Convertible).ConvertTo(dst.(conversion.Hub)) + case srcIsConvertible && dstIsConvertible: + return wh.convertViaHub(src.(conversion.Convertible), dst.(conversion.Convertible)) + default: + return fmt.Errorf("%T is not convertible to %T", src, dst) + } +} + +func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { + hub, err := wh.getHub(src) + if err != nil { + return err + } + + if hub == nil { + return fmt.Errorf("%s does not have any Hub defined", src) + } + + err = src.ConvertTo(hub) + if err != nil { + return fmt.Errorf("%T failed to convert to hub version %T : %w", src, hub, err) + } + + err = dst.ConvertFrom(hub) + if err != nil { + return fmt.Errorf("%T failed to convert from hub version %T : %w", dst, hub, err) + } + + return nil +} + +// getHub returns an instance of the Hub for passed-in object's group/kind. +func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + var hub conversion.Hub + var hubFoundAlready bool + for _, gvk := range gvks { + instance, err := wh.scheme.New(gvk) + if err != nil { + return nil, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + if val, isHub := instance.(conversion.Hub); isHub { + if hubFoundAlready { + return nil, fmt.Errorf("multiple hub version defined for %T", obj) + } + hubFoundAlready = true + hub = val + } + } + return hub, nil +} + +// allocateDstObject returns an instance for a given GVK. +func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { + gvk := schema.FromAPIVersionAndKind(apiVersion, kind) + + obj, err := wh.scheme.New(gvk) + if err != nil { + return obj, err + } + + t, err := meta.TypeAccessor(obj) + if err != nil { + return obj, err + } + + t.SetAPIVersion(apiVersion) + t.SetKind(kind) + + return obj, nil +} + +// IsConvertible determines if given type is convertible or not. For a type +// to be convertible, the group-kind needs to have a Hub type defined and all +// non-hub types must be able to convert to/from Hub. +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { + var hubs, spokes, nonSpokes []runtime.Object + + gvks, err := objectGVKs(scheme, obj) + if err != nil { + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + for _, gvk := range gvks { + instance, err := scheme.New(gvk) + if err != nil { + return false, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + + if isHub(instance) { + hubs = append(hubs, instance) + continue + } + + if !isConvertible(instance) { + nonSpokes = append(nonSpokes, instance) + continue + } + + spokes = append(spokes, instance) + } + + if len(gvks) == 1 { + return false, nil // single version + } + + if len(hubs) == 0 && len(spokes) == 0 { + // multiple version detected with no conversion implementation. This is + // true for multi-version built-in types. + return false, nil + } + + if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible + return true, nil + } + + return false, PartialImplementationError{ + hubs: hubs, + nonSpokes: nonSpokes, + spokes: spokes, + } +} + +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + +// PartialImplementationError represents an error due to partial conversion +// implementation such as hub without spokes, multiple hubs or spokes without hub. +type PartialImplementationError struct { + gvk schema.GroupVersionKind + hubs []runtime.Object + nonSpokes []runtime.Object + spokes []runtime.Object +} + +func (e PartialImplementationError) Error() string { + if len(e.hubs) == 0 { + return fmt.Sprintf("no hub defined for gvk %s", e.gvk) + } + if len(e.hubs) > 1 { + return fmt.Sprintf("multiple(%d) hubs defined for group-kind '%s' ", + len(e.hubs), e.gvk.GroupKind()) + } + if len(e.nonSpokes) > 0 { + return fmt.Sprintf("%d inconvertible types detected for group-kind '%s'", + len(e.nonSpokes), e.gvk.GroupKind()) + } + return "" +} + +// isHub determines if passed-in object is a Hub or not. +func isHub(obj runtime.Object) bool { + _, yes := obj.(conversion.Hub) + return yes +} + +// isConvertible determines if passed-in object is a convertible. +func isConvertible(obj runtime.Object) bool { + _, yes := obj.(conversion.Convertible) + return yes +} + +// helper to construct error response. +func errored(err error) *apix.ConversionResponse { + return &apix.ConversionResponse{ + Result: metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + }, + } +} diff --git a/pkg/webhook/conversion/conversion_suite_test.go b/pkg/webhook/conversion/conversion_suite_test.go new file mode 100644 index 0000000000..bb3798747c --- /dev/null +++ b/pkg/webhook/conversion/conversion_suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package conversion + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestConversionWebhook(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "CRD conversion Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/webhook/conversion/conversion_test.go b/pkg/webhook/conversion/conversion_test.go new file mode 100644 index 0000000000..2dd5e2ae0e --- /dev/null +++ b/pkg/webhook/conversion/conversion_test.go @@ -0,0 +1,362 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appsv1beta1 "k8s.io/api/apps/v1beta1" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + + jobsv1 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v1" + jobsv2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" + jobsv3 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v3" +) + +var _ = Describe("Conversion Webhook", func() { + + var respRecorder *httptest.ResponseRecorder + var decoder *Decoder + var scheme *runtime.Scheme + webhook := Webhook{} + + BeforeEach(func() { + respRecorder = &httptest.ResponseRecorder{ + Body: bytes.NewBuffer(nil), + } + + scheme = runtime.NewScheme() + Expect(kscheme.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv1.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv2.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv3.AddToScheme(scheme)).To(Succeed()) + Expect(webhook.InjectScheme(scheme)).To(Succeed()) + + var err error + decoder, err = NewDecoder(scheme) + Expect(err).NotTo(HaveOccurred()) + + }) + + doRequest := func(convReq *apix.ConversionReview) *apix.ConversionReview { + var payload bytes.Buffer + + Expect(json.NewEncoder(&payload).Encode(convReq)).Should(Succeed()) + + convReview := &apix.ConversionReview{} + req := &http.Request{ + Body: io.NopCloser(bytes.NewReader(payload.Bytes())), + } + webhook.ServeHTTP(respRecorder, req) + Expect(json.NewDecoder(respRecorder.Result().Body).Decode(convReview)).To(Succeed()) + return convReview + } + + makeV1Obj := func() *jobsv1.ExternalJob { + return &jobsv1.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv1.ExternalJobSpec{ + RunAt: "every 2 seconds", + }, + } + } + + makeV2Obj := func() *jobsv2.ExternalJob { + return &jobsv2.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v2", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv2.ExternalJobSpec{ + ScheduleAt: "every 2 seconds", + }, + } + } + + It("should convert spoke to hub successfully", func() { + + v1Obj := makeV1Obj() + + expected := &jobsv2.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v2", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv2.ExternalJobSpec{ + ScheduleAt: "every 2 seconds", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v2", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(1)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusSuccess)) + got, _, err := decoder.Decode(convReview.Response.ConvertedObjects[0].Raw) + Expect(err).NotTo(HaveOccurred()) + Expect(got).To(Equal(expected)) + }) + + It("should convert hub to spoke successfully", func() { + + v2Obj := makeV2Obj() + + expected := &jobsv1.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv1.ExternalJobSpec{ + RunAt: "every 2 seconds", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v1", + Objects: []runtime.RawExtension{ + { + Object: v2Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(1)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusSuccess)) + got, _, err := decoder.Decode(convReview.Response.ConvertedObjects[0].Raw) + Expect(err).NotTo(HaveOccurred()) + Expect(got).To(Equal(expected)) + }) + + It("should convert spoke to spoke successfully", func() { + + v1Obj := makeV1Obj() + + expected := &jobsv3.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v3", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv3.ExternalJobSpec{ + DeferredAt: "every 2 seconds", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v3", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(1)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusSuccess)) + got, _, err := decoder.Decode(convReview.Response.ConvertedObjects[0].Raw) + Expect(err).NotTo(HaveOccurred()) + Expect(got).To(Equal(expected)) + }) + + It("should return error when dest/src objects belong to different API groups", func() { + v1Obj := makeV1Obj() + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + // request conversion for different group + DesiredAPIVersion: "jobss.example.org/v2", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + Expect(convReview.Response.Result.Status).To(Equal("Failure")) + Expect(convReview.Response.ConvertedObjects).To(BeEmpty()) + }) + + It("should return error when dest/src objects are of same type", func() { + + v1Obj := makeV1Obj() + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v1", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + Expect(convReview.Response.Result.Status).To(Equal("Failure")) + Expect(convReview.Response.ConvertedObjects).To(BeEmpty()) + }) + + It("should return error when the API group does not have a hub defined", func() { + + v1Obj := &appsv1beta1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "apps/v1", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + Expect(convReview.Response.Result.Status).To(Equal("Failure")) + Expect(convReview.Response.ConvertedObjects).To(BeEmpty()) + }) + +}) + +var _ = Describe("IsConvertible", func() { + + var scheme *runtime.Scheme + + BeforeEach(func() { + scheme = runtime.NewScheme() + + Expect(kscheme.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv1.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv2.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv3.AddToScheme(scheme)).To(Succeed()) + }) + + It("should not error for uninitialized types", func() { + obj := &jobsv2.ExternalJob{} + + ok, err := IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) + }) + + It("should not error for unstructured types", func() { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "ExternalJob", + "apiVersion": "jobs.testprojects.kb.io/v2", + }, + } + + ok, err := IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) + }) + + It("should return true for convertible types", func() { + obj := &jobsv2.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v2", + }, + } + + ok, err := IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) + }) + + It("should return false for a non convertible type", func() { + obj := &appsv1beta1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1beta1", + }, + } + + ok, err := IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).ToNot(BeTrue()) + }) +}) diff --git a/pkg/webhook/conversion/decoder.go b/pkg/webhook/conversion/decoder.go new file mode 100644 index 0000000000..6a9e9c2365 --- /dev/null +++ b/pkg/webhook/conversion/decoder.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Decoder knows how to decode the contents of a CRD version conversion +// request into a concrete object. +// TODO(droot): consider reusing decoder from admission pkg for this. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object. +func (d *Decoder) Decode(content []byte) (runtime.Object, *schema.GroupVersionKind, error) { + deserializer := d.codecs.UniversalDeserializer() + return deserializer.Decode(content, nil, nil) +} + +// DecodeInto decodes the inlined object in the into the passed-in runtime.Object. +func (d *Decoder) DecodeInto(content []byte, into runtime.Object) error { + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, content, into) +} diff --git a/pkg/webhook/conversion/testdata/.gitignore b/pkg/webhook/conversion/testdata/.gitignore new file mode 100644 index 0000000000..d97ffc5159 --- /dev/null +++ b/pkg/webhook/conversion/testdata/.gitignore @@ -0,0 +1,24 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/pkg/webhook/conversion/testdata/Makefile b/pkg/webhook/conversion/testdata/Makefile new file mode 100644 index 0000000000..2d9d3dda15 --- /dev/null +++ b/pkg/webhook/conversion/testdata/Makefile @@ -0,0 +1,64 @@ + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true" + +all: manager + +# Run tests +test: generate fmt vet manifests + go test ./api/... ./controllers/... -coverprofile cover.out + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet + go run ./main.go + +# Install CRDs into a cluster +install: manifests + kubectl apply -f config/crd/bases + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests + kubectl apply -f config/crd/bases + kustomize build config/default | kubectl apply -f - + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + @echo "updating kustomize image patch file for manager resource" + sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml + +# Push the docker image +docker-push: + docker push ${IMG} + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.2 +CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) +endif diff --git a/pkg/webhook/conversion/testdata/PROJECT b/pkg/webhook/conversion/testdata/PROJECT new file mode 100644 index 0000000000..6b168dcbc1 --- /dev/null +++ b/pkg/webhook/conversion/testdata/PROJECT @@ -0,0 +1,13 @@ +version: "2" +domain: testprojects.kb.io +repo: sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata +resources: +- group: jobs + version: v1 + kind: ExternalJob +- group: jobs + version: v2 + kind: ExternalJob +- group: jobs + version: v3 + kind: ExternalJob diff --git a/pkg/webhook/conversion/testdata/api/v1/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v1/externaljob_types.go new file mode 100644 index 0000000000..bf99e2a204 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v1/externaljob_types.go @@ -0,0 +1,92 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + v2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ExternalJobSpec defines the desired state of ExternalJob +type ExternalJobSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + RunAt string `json:"runAt"` +} + +// ExternalJobStatus defines the observed state of ExternalJob +type ExternalJobStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// ExternalJob is the Schema for the externaljobs API +type ExternalJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalJobSpec `json:"spec,omitempty"` + Status ExternalJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExternalJobList contains a list of ExternalJob +type ExternalJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalJob `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalJob{}, &ExternalJobList{}) +} + +// ConvertTo implements conversion logic to convert to Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertTo(dst conversion.Hub) error { + switch t := dst.(type) { + case *v2.ExternalJob: + jobv2 := dst.(*v2.ExternalJob) + jobv2.ObjectMeta = ej.ObjectMeta + jobv2.Spec.ScheduleAt = ej.Spec.RunAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} + +// ConvertFrom implements conversion logic to convert from Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertFrom(src conversion.Hub) error { + switch t := src.(type) { + case *v2.ExternalJob: + jobv2 := src.(*v2.ExternalJob) + ej.ObjectMeta = jobv2.ObjectMeta + ej.Spec.RunAt = jobv2.Spec.ScheduleAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} diff --git a/pkg/webhook/conversion/testdata/api/v1/groupversion_info.go b/pkg/webhook/conversion/testdata/api/v1/groupversion_info.go new file mode 100644 index 0000000000..5bbef61786 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v1/groupversion_info.go @@ -0,0 +1,35 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the jobs v1 API group +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7208ba8c69 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,113 @@ +// +build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. +func (in *ExternalJob) DeepCopy() *ExternalJob { + if in == nil { + return nil + } + out := new(ExternalJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. +func (in *ExternalJobList) DeepCopy() *ExternalJobList { + if in == nil { + return nil + } + out := new(ExternalJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. +func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { + if in == nil { + return nil + } + out := new(ExternalJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. +func (in *ExternalJobStatus) DeepCopy() *ExternalJobStatus { + if in == nil { + return nil + } + out := new(ExternalJobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/webhook/conversion/testdata/api/v2/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v2/externaljob_types.go new file mode 100644 index 0000000000..de5a03a212 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v2/externaljob_types.go @@ -0,0 +1,68 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ExternalJobSpec defines the desired state of ExternalJob +type ExternalJobSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + ScheduleAt string `json:"scheduleAt"` +} + +// ExternalJobStatus defines the observed state of ExternalJob +type ExternalJobStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// ExternalJob is the Schema for the externaljobs API +type ExternalJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalJobSpec `json:"spec,omitempty"` + Status ExternalJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExternalJobList contains a list of ExternalJob +type ExternalJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalJob `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalJob{}, &ExternalJobList{}) +} + +// Hub is just a marker method to indicate that v2.ExternalJob is the Hub type +// in this case. +// v2.ExternalJob is the storage version so mark this as Hub. +// Storage version doesn't need to implement any conversion methods because +// default conversionHandler implements conversion logic for storage version. +// TODO(droot): Add comment annotation here to mark it as storage version +func (ej *ExternalJob) Hub() {} diff --git a/pkg/webhook/conversion/testdata/api/v2/groupversion_info.go b/pkg/webhook/conversion/testdata/api/v2/groupversion_info.go new file mode 100644 index 0000000000..5019111a00 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v2/groupversion_info.go @@ -0,0 +1,35 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v2 contains API Schema definitions for the jobs v2 API group +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io +package v2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..53c9f758b1 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go @@ -0,0 +1,113 @@ +// +build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. +func (in *ExternalJob) DeepCopy() *ExternalJob { + if in == nil { + return nil + } + out := new(ExternalJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. +func (in *ExternalJobList) DeepCopy() *ExternalJobList { + if in == nil { + return nil + } + out := new(ExternalJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. +func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { + if in == nil { + return nil + } + out := new(ExternalJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. +func (in *ExternalJobStatus) DeepCopy() *ExternalJobStatus { + if in == nil { + return nil + } + out := new(ExternalJobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go new file mode 100644 index 0000000000..15c438f68a --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go @@ -0,0 +1,92 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3 + +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + v2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ExternalJobSpec defines the desired state of ExternalJob +type ExternalJobSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + DeferredAt string `json:"deferredAt"` +} + +// ExternalJobStatus defines the observed state of ExternalJob +type ExternalJobStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// ExternalJob is the Schema for the externaljobs API +type ExternalJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalJobSpec `json:"spec,omitempty"` + Status ExternalJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExternalJobList contains a list of ExternalJob +type ExternalJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalJob `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalJob{}, &ExternalJobList{}) +} + +// ConvertTo implements conversion logic to convert to Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertTo(dst conversion.Hub) error { + switch t := dst.(type) { + case *v2.ExternalJob: + jobv2 := dst.(*v2.ExternalJob) + jobv2.ObjectMeta = ej.ObjectMeta + jobv2.Spec.ScheduleAt = ej.Spec.DeferredAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} + +// ConvertFrom implements conversion logic to convert from Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertFrom(src conversion.Hub) error { + switch t := src.(type) { + case *v2.ExternalJob: + jobv2 := src.(*v2.ExternalJob) + ej.ObjectMeta = jobv2.ObjectMeta + ej.Spec.DeferredAt = jobv2.Spec.ScheduleAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} diff --git a/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go b/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go new file mode 100644 index 0000000000..1ae8269614 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go @@ -0,0 +1,35 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v3 contains API Schema definitions for the jobs v3 API group +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io +package v3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a90942b427 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go @@ -0,0 +1,113 @@ +// +build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. +func (in *ExternalJob) DeepCopy() *ExternalJob { + if in == nil { + return nil + } + out := new(ExternalJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. +func (in *ExternalJobList) DeepCopy() *ExternalJobList { + if in == nil { + return nil + } + out := new(ExternalJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. +func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { + if in == nil { + return nil + } + out := new(ExternalJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. +func (in *ExternalJobStatus) DeepCopy() *ExternalJobStatus { + if in == nil { + return nil + } + out := new(ExternalJobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/webhook/conversion/testdata/hack/boilerplate.go.txt b/pkg/webhook/conversion/testdata/hack/boilerplate.go.txt new file mode 100644 index 0000000000..b92001fb4e --- /dev/null +++ b/pkg/webhook/conversion/testdata/hack/boilerplate.go.txt @@ -0,0 +1,14 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/pkg/webhook/conversion/testdata/main.go b/pkg/webhook/conversion/testdata/main.go new file mode 100644 index 0000000000..a3922da009 --- /dev/null +++ b/pkg/webhook/conversion/testdata/main.go @@ -0,0 +1,74 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "os" + + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + jobsv1 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v1" + jobsv2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" + jobsv3 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v3" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + + jobsv1.AddToScheme(scheme) + jobsv2.AddToScheme(scheme) + jobsv3.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.Parse() + + ctrl.SetLogger(zap.Logger(true)) + + mgr, err := ctrl.NewManager(context.Background(), ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/pkg/webhook/doc.go b/pkg/webhook/doc.go new file mode 100644 index 0000000000..2c93f0d995 --- /dev/null +++ b/pkg/webhook/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package webhook provides methods to build and bootstrap a webhook server. + +Currently, it only supports admission webhooks. It will support CRD conversion webhooks in the near future. +*/ +package webhook + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("webhook") diff --git a/pkg/webhook/example_test.go b/pkg/webhook/example_test.go new file mode 100644 index 0000000000..e7872ae5da --- /dev/null +++ b/pkg/webhook/example_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "net/http" + + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + . "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var ( + // Build webhooks used for the various server + // configuration options + // + // These handlers could be also be implementations + // of the AdmissionHandler interface for more complex + // implementations. + mutatingHook = &Admission{ + Handler: admission.HandlerFunc(func(ctx context.Context, req AdmissionRequest) AdmissionResponse { + return Patched("some changes", + JSONPatchOp{Operation: "add", Path: "/metadata/annotations/access", Value: "granted"}, + JSONPatchOp{Operation: "add", Path: "/metadata/annotations/reason", Value: "not so secret"}, + ) + }), + } + + validatingHook = &Admission{ + Handler: admission.HandlerFunc(func(ctx context.Context, req AdmissionRequest) AdmissionResponse { + return Denied("none shall pass!") + }), + } +) + +// This example registers a webhooks to a webhook server +// that gets ran by a controller manager. +func Example() { + // Create a manager + // Note: GetConfigOrDie will os.Exit(1) w/o any message if no kube-config can be found + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + panic(err) + } + + // Create a webhook server. + hookServer := &Server{ + Port: 8443, + } + if err := mgr.Add(hookServer); err != nil { + panic(err) + } + + // Register the webhooks in the server. + hookServer.Register("/mutating", mutatingHook) + hookServer.Register("/validating", validatingHook) + + // Start the server by starting a previously-set-up manager + err = mgr.Start(ctrl.SetupSignalHandler()) + if err != nil { + // handle error + panic(err) + } +} + +// This example creates a webhook server that can be +// ran without a controller manager. +// +// Note that this assumes and requires a valid TLS +// cert and key at the default locations +// tls.crt and tls.key. +func ExampleServer_StartStandalone() { + // Create a webhook server + hookServer := &Server{ + Port: 8443, + } + + // Register the webhooks in the server. + hookServer.Register("/mutating", mutatingHook) + hookServer.Register("/validating", validatingHook) + + // Start the server without a manger + err := hookServer.StartStandalone(signals.SetupSignalHandler(), scheme.Scheme) + if err != nil { + // handle error + panic(err) + } +} + +// This example creates a standalone webhook handler +// and runs it on a vanilla go HTTP server to demonstrate +// how you could run a webhook on an existing server +// without a controller manager. +func ExampleStandaloneWebhook() { + // Assume you have an existing HTTP server at your disposal + // configured as desired (e.g. with TLS). + // For this example just create a basic http.ServeMux + mux := http.NewServeMux() + port := ":8000" + + // Create the standalone HTTP handlers from our webhooks + mutatingHookHandler, err := admission.StandaloneWebhook(mutatingHook, admission.StandaloneOptions{ + Scheme: scheme.Scheme, + // Logger let's you optionally pass + // a custom logger (defaults to log.Log global Logger) + Logger: logf.RuntimeLog.WithName("mutating-webhook"), + // MetricsPath let's you optionally + // provide the path it will be served on + // to be used for labelling prometheus metrics + // If none is set, prometheus metrics will not be generated. + MetricsPath: "/mutating", + }) + if err != nil { + // handle error + panic(err) + } + + validatingHookHandler, err := admission.StandaloneWebhook(validatingHook, admission.StandaloneOptions{ + Scheme: scheme.Scheme, + Logger: logf.RuntimeLog.WithName("validating-webhook"), + MetricsPath: "/validating", + }) + if err != nil { + // handle error + panic(err) + } + + // Register the webhook handlers to your server + mux.Handle("/mutating", mutatingHookHandler) + mux.Handle("/validating", validatingHookHandler) + + // Run your handler + if err := http.ListenAndServe(port, mux); err != nil { //nolint:gosec // it's fine to not set timeouts here + panic(err) + } +} diff --git a/pkg/webhook/internal/metrics/metrics.go b/pkg/webhook/internal/metrics/metrics.go new file mode 100644 index 0000000000..557004908b --- /dev/null +++ b/pkg/webhook/internal/metrics/metrics.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // RequestLatency is a prometheus metric which is a histogram of the latency + // of processing admission requests. + RequestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "controller_runtime_webhook_latency_seconds", + Help: "Histogram of the latency of processing admission requests", + }, + []string{"webhook"}, + ) + + // RequestTotal is a prometheus metric which is a counter of the total processed admission requests. + RequestTotal = func() *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "controller_runtime_webhook_requests_total", + Help: "Total number of admission requests by HTTP status code.", + }, + []string{"webhook", "code"}, + ) + }() + + // RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests. + RequestInFlight = func() *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "controller_runtime_webhook_requests_in_flight", + Help: "Current number of admission requests being served.", + }, + []string{"webhook"}, + ) + }() +) + +func init() { + metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) +} + +// InstrumentedHook adds some instrumentation on top of the given webhook. +func InstrumentedHook(path string, hookRaw http.Handler) http.Handler { + lbl := prometheus.Labels{"webhook": path} + + lat := RequestLatency.MustCurryWith(lbl) + cnt := RequestTotal.MustCurryWith(lbl) + gge := RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) +} diff --git a/pkg/webhook/server.go b/pkg/webhook/server.go new file mode 100644 index 0000000000..06f479208a --- /dev/null +++ b/pkg/webhook/server.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +// DefaultPort is the default port that the webhook server serves. +var DefaultPort = 9443 + +// Server is an admission webhook server that can serve traffic and +// generates related k8s resources for deploying. +// +// TLS is required for a webhook to be accessed by kubernetes, so +// you must provide a CertName and KeyName or have valid cert/key +// at the default locations (tls.crt and tls.key). If you do not +// want to configure TLS (i.e for testing purposes) run an +// admission.StandaloneWebhook in your own server. +type Server struct { + // Host is the address that the server will listen on. + // Defaults to "" - all addresses. + Host string + + // Port is the port number that the server will serve. + // It will be defaulted to 9443 if unspecified. + Port int + + // CertDir is the directory that contains the server key and certificate. The + // server key and certificate. + CertDir string + + // CertName is the server certificate name. Defaults to tls.crt. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + KeyName string + + // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. + // Defaults to "", which means server does not verify client's certificate. + ClientCAName string + + // TLSVersion is the minimum version of TLS supported. Accepts + // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) + TLSMinVersion string + + // TLSOpts is used to allow configuring the TLS config used for the server + TLSOpts []func(*tls.Config) + + // WebhookMux is the multiplexer that handles different webhooks. + WebhookMux *http.ServeMux + + // webhooks keep track of all registered webhooks for dependency injection, + // and to provide better panic messages on duplicate webhook registration. + webhooks map[string]http.Handler + + // setFields allows injecting dependencies from an external source + setFields inject.Func + + // defaultingOnce ensures that the default fields are only ever set once. + defaultingOnce sync.Once + + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + + // mu protects access to the webhook map & setFields for Start, Register, etc + mu sync.Mutex +} + +// setDefaults does defaulting for the Server. +func (s *Server) setDefaults() { + s.webhooks = map[string]http.Handler{} + if s.WebhookMux == nil { + s.WebhookMux = http.NewServeMux() + } + + if s.Port <= 0 { + s.Port = DefaultPort + } + + if len(s.CertDir) == 0 { + s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + } + + if len(s.CertName) == 0 { + s.CertName = "tls.crt" + } + + if len(s.KeyName) == 0 { + s.KeyName = "tls.key" + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the webhook server doesn't need leader election. +func (*Server) NeedLeaderElection() bool { + return false +} + +// Register marks the given webhook as being served at the given path. +// It panics if two hooks are registered on the same path. +func (s *Server) Register(path string, hook http.Handler) { + s.mu.Lock() + defer s.mu.Unlock() + + s.defaultingOnce.Do(s.setDefaults) + if _, found := s.webhooks[path]; found { + panic(fmt.Errorf("can't register duplicate path: %v", path)) + } + // TODO(directxman12): call setfields if we've already started the server + s.webhooks[path] = hook + s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + + regLog := log.WithValues("path", path) + regLog.Info("Registering webhook") + + // we've already been "started", inject dependencies here. + // Otherwise, InjectFunc will do this for us later. + if s.setFields != nil { + if err := s.setFields(hook); err != nil { + // TODO(directxman12): swallowing this error isn't great, but we'd have to + // change the signature to fix that + regLog.Error(err, "unable to inject fields into webhook during registration") + } + + baseHookLog := log.WithName("webhooks") + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { + regLog.Error(err, "unable to logger into webhook during registration") + } + } +} + +// StartStandalone runs a webhook server without +// a controller manager. +func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { + // Use the Kubernetes client-go scheme if none is specified + if scheme == nil { + scheme = kscheme.Scheme + } + + if err := s.InjectFunc(func(i interface{}) error { + if _, err := inject.SchemeInto(scheme, i); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return s.Start(ctx) +} + +// tlsVersion converts from human-readable TLS version (for example "1.1") +// to the values accepted by tls.Config (for example 0x301). +func tlsVersion(version string) (uint16, error) { + switch version { + // default is previous behaviour + case "": + return tls.VersionTLS10, nil + case "1.0": + return tls.VersionTLS10, nil + case "1.1": + return tls.VersionTLS11, nil + case "1.2": + return tls.VersionTLS12, nil + case "1.3": + return tls.VersionTLS13, nil + default: + return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) + } +} + +// Start runs the server. +// It will install the webhook related resources depend on the server configuration. +func (s *Server) Start(ctx context.Context) error { + s.defaultingOnce.Do(s.setDefaults) + + baseHookLog := log.WithName("webhooks") + baseHookLog.Info("Starting webhook server") + + certPath := filepath.Join(s.CertDir, s.CertName) + keyPath := filepath.Join(s.CertDir, s.KeyName) + + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + + tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + if err != nil { + return err + } + + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + GetCertificate: certWatcher.GetCertificate, + MinVersion: tlsMinVersion, + } + + // load CA to verify client certificate + if s.ClientCAName != "" { + certPool := x509.NewCertPool() + clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + if err != nil { + return fmt.Errorf("failed to read client CA cert: %w", err) + } + + ok := certPool.AppendCertsFromPEM(clientCABytes) + if !ok { + return fmt.Errorf("failed to append client CA cert to CA pool") + } + + cfg.ClientCAs = certPool + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.TLSOpts { + op(cfg) + } + + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + if err != nil { + return err + } + + log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + + srv := httpserver.New(s.WebhookMux) + + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down webhook server") + + // TODO: use a context with reasonable timeout + if err := srv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + s.mu.Lock() + s.started = true + s.mu.Unlock() + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *Server) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port. + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %w", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %w", err) + } + + return nil + } +} + +// InjectFunc injects the field setter into the server. +func (s *Server) InjectFunc(f inject.Func) error { + s.setFields = f + + // inject fields here that weren't injected in Register because we didn't have setFields yet. + baseHookLog := log.WithName("webhooks") + for hookPath, webhook := range s.webhooks { + if err := s.setFields(webhook); err != nil { + return err + } + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { + return err + } + } + return nil +} diff --git a/pkg/webhook/server_test.go b/pkg/webhook/server_test.go new file mode 100644 index 0000000000..5e77564194 --- /dev/null +++ b/pkg/webhook/server_test.go @@ -0,0 +1,265 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var _ = Describe("Webhook Server", func() { + var ( + ctx context.Context + ctxCancel context.CancelFunc + testHostPort string + client *http.Client + server *webhook.Server + servingOpts envtest.WebhookInstallOptions + ) + + BeforeEach(func() { + ctx, ctxCancel = context.WithCancel(context.Background()) + // closed in individual tests differently + + servingOpts = envtest.WebhookInstallOptions{} + Expect(servingOpts.PrepWithoutInstalling()).To(Succeed()) + + testHostPort = net.JoinHostPort(servingOpts.LocalServingHost, fmt.Sprintf("%d", servingOpts.LocalServingPort)) + + // bypass needing to set up the x509 cert pool, etc ourselves + clientTransport, err := rest.TransportFor(&rest.Config{ + TLSClientConfig: rest.TLSClientConfig{CAData: servingOpts.LocalServingCAData}, + }) + Expect(err).NotTo(HaveOccurred()) + client = &http.Client{ + Transport: clientTransport, + } + + server = &webhook.Server{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + } + }) + AfterEach(func() { + Expect(servingOpts.Cleanup()).To(Succeed()) + }) + + genericStartServer := func(f func(ctx context.Context)) (done <-chan struct{}) { + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + f(ctx) + }() + // wait till we can ping the server to start the test + Eventually(func() error { + _, err := client.Get(fmt.Sprintf("https://%s/unservedpath", testHostPort)) + return err + }).Should(Succeed()) + + // this is normally called before Start by the manager + Expect(server.InjectFunc(func(i interface{}) error { + boolInj, canInj := i.(interface{ InjectBool(bool) error }) + if !canInj { + return nil + } + return boolInj.InjectBool(true) + })).To(Succeed()) + + return doneCh + } + + startServer := func() (done <-chan struct{}) { + return genericStartServer(func(ctx context.Context) { + Expect(server.Start(ctx)).To(Succeed()) + }) + } + + // TODO(directxman12): figure out a good way to test all the serving setup + // with httptest.Server to get all the niceness from that. + + Context("when serving", func() { + PIt("should verify the client CA name when asked to", func() { + + }) + PIt("should support HTTP/2", func() { + + }) + + // TODO(directxman12): figure out a good way to test the port default, etc + }) + + It("should panic if a duplicate path is registered", func() { + server.Register("/somepath", &testHandler{}) + doneCh := startServer() + + Expect(func() { server.Register("/somepath", &testHandler{}) }).To(Panic()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + Context("when registering new webhooks before starting", func() { + It("should serve a webhook on the requested path", func() { + server.Register("/somepath", &testHandler{}) + + Expect(server.StartedChecker()(nil)).ToNot(Succeed()) + + doneCh := startServer() + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + + Expect(server.StartedChecker()(nil)).To(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should inject dependencies eventually, given an inject func is eventually provided", func() { + handler := &testHandler{} + server.Register("/somepath", handler) + doneCh := startServer() + + Eventually(func() bool { return handler.injectedField }).Should(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + }) + + Context("when registering webhooks after starting", func() { + var ( + doneCh <-chan struct{} + ) + BeforeEach(func() { + doneCh = startServer() + }) + AfterEach(func() { + // wait for cleanup to happen + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should serve a webhook on the requested path", func() { + server.Register("/somepath", &testHandler{}) + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + + Expect(io.ReadAll(resp.Body)).To(Equal([]byte("gadzooks!"))) + }) + + It("should inject dependencies, if an inject func has been provided already", func() { + handler := &testHandler{} + server.Register("/somepath", handler) + Expect(handler.injectedField).To(BeTrue()) + }) + }) + + It("should be able to serve in unmanaged mode", func() { + server = &webhook.Server{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + } + server.Register("/somepath", &testHandler{}) + doneCh := genericStartServer(func(ctx context.Context) { + Expect(server.StartStandalone(ctx, scheme.Scheme)) + }) + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should respect passed in TLS configurations", func() { + var finalCfg *tls.Config + tlsCfgFunc := func(cfg *tls.Config) { + cfg.CipherSuites = []uint16{ + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + } + // save cfg after changes to test against + finalCfg = cfg + } + server = &webhook.Server{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + TLSMinVersion: "1.2", + TLSOpts: []func(*tls.Config){ + tlsCfgFunc, + }, + } + server.Register("/somepath", &testHandler{}) + doneCh := genericStartServer(func(ctx context.Context) { + Expect(server.StartStandalone(ctx, scheme.Scheme)) + }) + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + Expect(finalCfg.MinVersion).To(Equal(uint16(tls.VersionTLS12))) + Expect(finalCfg.CipherSuites).To(ContainElements( + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + )) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) +}) + +type testHandler struct { + injectedField bool +} + +func (t *testHandler) InjectBool(val bool) error { + t.injectedField = val + return nil +} +func (t *testHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if _, err := resp.Write([]byte("gadzooks!")); err != nil { + panic("unable to write http response!") + } +} diff --git a/pkg/webhook/webhook_integration_test.go b/pkg/webhook/webhook_integration_test.go new file mode 100644 index 0000000000..029a503b4b --- /dev/null +++ b/pkg/webhook/webhook_integration_test.go @@ -0,0 +1,226 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/http" + "path/filepath" + "strconv" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission/admissiontest" +) + +var _ = Describe("Webhook", func() { + var c client.Client + var obj *appsv1.Deployment + BeforeEach(func() { + Expect(cfg).NotTo(BeNil()) + var err error + c, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + }) + Context("when running a webhook server with a manager", func() { + It("should reject create request for webhook that rejects all requests", func() { + m, err := manager.New(cfg, manager.Options{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{}}) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err = server.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err = c.Create(context.TODO(), obj) + return apierrors.ReasonForError(err) == metav1.StatusReason("Always denied") + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + It("should reject create request for multi-webhook that rejects all requests", func() { + m, err := manager.New(cfg, manager.Options{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: admission.MultiValidatingHandler(&rejectingValidator{})}) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err = server.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err = c.Create(context.TODO(), obj) + return apierrors.ReasonForError(err) == metav1.StatusReason("Always denied") + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + }) + Context("when running a webhook server without a manager", func() { + It("should reject create request for webhook that rejects all requests", func() { + server := webhook.Server{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + } + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{}}) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err := server.StartStandalone(ctx, scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err := c.Create(context.TODO(), obj) + return apierrors.ReasonForError(err) == metav1.StatusReason("Always denied") + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + }) + Context("when running a standalone webhook", func() { + It("should reject create request for webhook that rejects all requests", func() { + ctx, cancel := context.WithCancel(context.Background()) + + By("generating the TLS config") + certPath := filepath.Join(testenv.WebhookInstallOptions.LocalServingCertDir, "tls.crt") + keyPath := filepath.Join(testenv.WebhookInstallOptions.LocalServingCertDir, "tls.key") + + certWatcher, err := certwatcher.New(certPath, keyPath) + Expect(err).NotTo(HaveOccurred()) + go func() { + Expect(certWatcher.Start(ctx)).NotTo(HaveOccurred()) + }() + + cfg := &tls.Config{ + NextProtos: []string{"h2"}, + GetCertificate: certWatcher.GetCertificate, + MinVersion: tls.VersionTLS12, + } + + By("generating the listener") + listener, err := tls.Listen("tcp", + net.JoinHostPort(testenv.WebhookInstallOptions.LocalServingHost, + strconv.Itoa(testenv.WebhookInstallOptions.LocalServingPort)), cfg) + Expect(err).NotTo(HaveOccurred()) + + By("creating and registering the standalone webhook") + hook, err := admission.StandaloneWebhook(admission.ValidatingWebhookFor( + &admissiontest.FakeValidator{ + ErrorToReturn: errors.New("Always denied"), + GVKToReturn: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, + }), admission.StandaloneOptions{}) + Expect(err).NotTo(HaveOccurred()) + http.Handle("/failing", hook) + + By("running the http server") + srv := httpserver.New(nil) + go func() { + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + Expect(srv.Shutdown(context.Background())).NotTo(HaveOccurred()) + close(idleConnsClosed) + }() + _ = srv.Serve(listener) + <-idleConnsClosed + }() + + Eventually(func() bool { + err = c.Create(context.TODO(), obj) + return apierrors.ReasonForError(err) == metav1.StatusReason("Always denied") + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + }) +}) + +type rejectingValidator struct { + d *admission.Decoder +} + +func (v *rejectingValidator) InjectDecoder(d *admission.Decoder) error { + v.d = d + return nil +} + +func (v *rejectingValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + var obj appsv1.Deployment + if err := v.d.Decode(req, &obj); err != nil { + return admission.Denied(err.Error()) + } + return admission.Denied("Always denied") +} diff --git a/pkg/webhook/webhook_suite_test.go b/pkg/webhook/webhook_suite_test.go new file mode 100644 index 0000000000..b8ee879d36 --- /dev/null +++ b/pkg/webhook/webhook_suite_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + suiteName := "Webhook Integration Suite" + RunSpecsWithDefaultAndCustomReporters(t, suiteName, []Reporter{printer.NewlineReporter{}, printer.NewProwReporter(suiteName)}) +} + +var testenv *envtest.Environment +var cfg *rest.Config + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + // we're initializing webhook here and not in webhook.go to also test the envtest install code via WebhookOptions + initializeWebhookInEnvironment() + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + fmt.Println("stopping?") + Expect(testenv.Stop()).To(Succeed()) +}, 60) + +func initializeWebhookInEnvironment() { + namespacedScopeV1 := admissionv1.NamespacedScope + failedTypeV1 := admissionv1.Fail + equivalentTypeV1 := admissionv1.Equivalent + noSideEffectsV1 := admissionv1.SideEffectClassNone + webhookPathV1 := "/failing" + + testenv.WebhookInstallOptions = envtest.WebhookInstallOptions{ + ValidatingWebhooks: []*admissionv1.ValidatingWebhookConfiguration{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + }, + } +} diff --git a/tools/setup-envtest/README.md b/tools/setup-envtest/README.md new file mode 100644 index 0000000000..0a497c3ec4 --- /dev/null +++ b/tools/setup-envtest/README.md @@ -0,0 +1,125 @@ +# Envtest Binaries Manager + +This is a small tool that manages binaries for envtest. It can be used to +download new binaries, list currently installed and available ones, and +clean up versions. + +To use it, just go-install it on 1.16+ (it's a separate, self-contained +module): + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest +``` + +For full documentation, run it with the `--help` flag, but here are some +examples: + +```shell +# download the latest envtest, and print out info about it +setup-envtest use + +# download the latest 1.19 envtest, and print out the path +setup-envtest use -p path 1.19.x! + +# switch to the most recent 1.21 envtest on disk +source <(setup-envtest use -i -p env 1.21.x) + +# list all available local versions for darwin/amd64 +setup-envtest list -i --os darwin --arch amd64 + +# remove all versions older than 1.16 from disk +setup-envtest cleanup <1.16 + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal +# logic for 'use' +setup-envtest --use-env + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest +# installed version +setup-envtest use -i --use-env + +# sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store +setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz +``` + +## Where does it put all those binaries? + +By default, binaries are stored in a subdirectory of an OS-specific data +directory, as per the OS's conventions. + +On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on +OSX, `~/Library/Application Support`. + +There's an overall folder that holds all files, and inside that is +a folder for each version/platform pair. The exact directory structure is +not guarnateed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or +`setup-envtest switch` (generally with the `-p path` or `-p env` flags) to +get the directory that you should use. + +## Why do I have to do that `source <(blah blah blah)` thing + +This is a normal binary, not a shell script, so we can't set the parent +process's environment variables. If you use this by hand a lot and want +to save the typing, you could put something like the following in your +`~/.zshrc` (or similar for bash/fish/whatever, modified to those): + +```shell +setup-envtest() { + if (($@[(Ie)use])); then + source <($GOPATH/bin/setup-envtest "$@" -p env) + else + $GOPATH/bin/setup-envtest "$@" + fi +} +``` + +## What if I don't want to talk to the internet? + +There are a few options. + +First, you'll probably want to set the `-i/--installed` flag. If you want +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +env variable, which will switch that flag on by default. + +Then, you have a few options for managing your binaries: + +- If you don't *really* want to manage with this tool, or you want to + respect the $KUBEBUILDER_ASSETS variable if it's set to something + outside the store, use the `use --use-env -i` command. + + `--use-env` makes the command unconditionally use the value of + KUBEBUILDER_ASSETS as long as it contains the required binaries, and + `-i` indicates that we only ever want to work with installed binaries + (no reaching out the the remote GCS storage). + + As noted about, you can use `ENVTEST_INSTALLED_ONLY=true` to switch `-i` + on by default, and you can use `ENVTEST_USE_ENV=true` to switch + `--use-env` on by default. + +- If you want to use this tool, but download your gziped tarballs + separately, you can use the `sideload` command. You'll need to use the + `-k/--version` flag to indicate which version you're sideloading. + + After that, it'll be as if you'd installed the binaries with `use`. + +- If you want to talk to some internal source, you can use the + `--remote-bucket` and `--remote-server` options. The former sets which + GCS bucket to download from, and the latter sets the host to talk to as + if it were a GCS endpoint. Theoretically, you could use the latter + version to run an internal "mirror" -- the tool expects + + - `HOST/storage/v1/b/BUCKET/o` to return JSON like + + ```json + {"items": [ + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + {"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}, + ]} + ``` + + - `HOST/storage/v1/b/BUCKET/o/TARBALL_NAME` to return JSON like + `{"name": "kubebuilder-tools-X.Y.Z-os-arch.tar.gz", "md5Hash": ""}` + + - `HOST/storage/v1/b/BUCKET/o/TARBALL_NAME?alt=media` to return the + actual file contents diff --git a/tools/setup-envtest/env/env.go b/tools/setup-envtest/env/env.go new file mode 100644 index 0000000000..e12a107352 --- /dev/null +++ b/tools/setup-envtest/env/env.go @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" + "text/tabwriter" + + "github.com/go-logr/logr" + "github.com/spf13/afero" // too bad fs.FS isn't writable :-/ + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Env represents an environment for downloading and otherwise manipulating +// envtest binaries. +// +// In general, the methods will use the Exit{,Cause} functions from this package +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +type Env struct { + // the following *must* be set on input + + // Platform is our current platform + Platform versions.PlatformItem + + // VerifiySum indicates whether or not we should run checksums. + VerifySum bool + // NoDownload forces us to not contact GCS, looking only + // at local files instead. + NoDownload bool + // ForceDownload forces us to ignore local files and always + // contact GCS & re-download. + ForceDownload bool + + // Client is our remote client for contacting GCS. + Client *remote.Client + + // Log allows us to log. + Log logr.Logger + + // the following *may* be set on input, or may be discovered + + // Version is the version(s) that we want to download + // (may be automatically retrieved later on). + Version versions.Spec + + // Store is used to load/store entries to/from disk. + Store *store.Store + + // FS is the file system to read from/write to for provisioning temp files + // for storing the archives temporarily. + FS afero.Afero + + // Out is the place to write output text to + Out io.Writer + + // manualPath is the manually discovered path from PathMatches, if + // a non-store path was used. It'll be printed by PrintInfo if present. + manualPath string +} + +// CheckCoherence checks that this environment has filled-out, coherent settings +// (e.g. NoDownload & ForceDownload aren't both set). +func (e *Env) CheckCoherence() { + if e.NoDownload && e.ForceDownload { + Exit(2, "cannot both skip downloading *and* force re-downloading") + } + + if e.Platform.OS == "" || e.Platform.Arch == "" { + Exit(2, "must specify non-empty OS and arch (did you specify bad --os or --arch values?)") + } +} + +func (e *Env) filter() store.Filter { + return store.Filter{Version: e.Version, Platform: e.Platform.Platform} +} + +func (e *Env) item() store.Item { + concreteVer := e.Version.AsConcrete() + if concreteVer == nil || e.Platform.IsWildcard() { + panic("no platform/version set") // unexpected, print stack trace + } + return store.Item{Version: *concreteVer, Platform: e.Platform.Platform} +} + +// ListVersions prints out all available versions matching this Env's +// platform & version selector (respecting NoDownload to figure +// out whether or not to match remote versions). +func (e *Env) ListVersions(ctx context.Context) { + out := tabwriter.NewWriter(e.Out, 4, 4, 2, ' ', 0) + defer out.Flush() + localVersions, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to list installed versions") + } + for _, item := range localVersions { + // already filtered by onDiskVersions + fmt.Fprintf(out, "(installed)\tv%s\t%s\n", item.Version, item.Platform) + } + + if e.NoDownload { + return + } + + remoteVersions, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable list to available versions") + } + + for _, set := range remoteVersions { + if !e.Version.Matches(set.Version) { + continue + } + sort.Slice(set.Platforms, func(i, j int) bool { + return orderPlatforms(set.Platforms[i].Platform, set.Platforms[j].Platform) + }) + for _, plat := range set.Platforms { + if e.Platform.Matches(plat.Platform) { + fmt.Fprintf(out, "(available)\tv%s\t%s\n", set.Version, plat) + } + } + } +} + +// LatestVersion returns the latest version matching our version selector and +// platform from the remote server, with the correspoding checksum for later +// use as well. +func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { + vers, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable to list versions to find latest one") + } + for _, set := range vers { + if !e.Version.Matches(set.Version) { + e.Log.V(1).Info("skipping non-matching version", "version", set.Version) + continue + } + // double-check that our platform is supported + for _, plat := range set.Platforms { + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + if e.Platform.Matches(plat.Platform) && e.Version.Matches(set.Version) { + return set.Version, plat + } + } + e.Log.Info("latest version not supported for your platform, checking older ones", "version", set.Version, "platform", e.Platform) + } + + Exit(2, "unable to find a version that was supported for platform %s", e.Platform) + return versions.Concrete{}, versions.PlatformItem{} // unreachable, but Go's type system can't express the "never" type +} + +// ExistsAndValid checks if our current (concrete) version & platform +// exist on disk (unless ForceDownload is set, in which cause it always +// returns false). +// +// Must be called after EnsureVersionIsSet so that we have a concrete +// Version selected. Must have a concrete platform, or ForceDownload +// must be set. +func (e *Env) ExistsAndValid() bool { + if e.ForceDownload { + // we always want to download, so don't check here + return false + } + + if e.Platform.IsWildcard() { + Exit(2, "you must have a concrete platform with this command -- you cannot use wildcard platforms with fetch or switch") + } + + exists, err := e.Store.Has(e.item()) + if err != nil { + ExitCause(2, err, "unable to check if existing version exists") + } + + if exists { + e.Log.Info("applicable version found on disk", "version", e.Version) + } + return exists +} + +// EnsureVersionIsSet ensures that we have a non-wildcard version +// configured. +// +// If necessary, it will enumerate on-disk and remote versions to accomplish +// this, finding a version that matches our version selector and platform. +// It will always yield a concrete version, it *may* yield a concrete platorm +// as well. +func (e *Env) EnsureVersionIsSet(ctx context.Context) { + if e.Version.AsConcrete() != nil { + return + } + var localVer *versions.Concrete + var localPlat versions.Platform + + items, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to determine installed versions") + } + + for _, item := range items { + if !e.Version.Matches(item.Version) || !e.Platform.Matches(item.Platform) { + e.Log.V(1).Info("skipping version, doesn't match", "version", item.Version, "platform", item.Platform) + continue + } + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + ver := item.Version // copy to avoid referencing iteration variable + localVer = &ver + localPlat = item.Platform + break + } + + if e.NoDownload || !e.Version.CheckLatest { + // no version specified, but we either + // + // a) shouldn't contact remote + // b) don't care to find the absolute latest + // + // so just find the latest local version + if localVer != nil { + e.Version.MakeConcrete(*localVer) + e.Platform.Platform = localPlat + return + } + if e.NoDownload { + Exit(2, "no applicable on-disk versions for %s found, you'll have to download one, or run list -i to see what you do have", e.Platform) + } + // if we didn't ask for the latest version, but don't have anything + // available, try the internet ;-) + } + + // no version specified and we need the latest in some capacity, so find latest from remote + // so find the latest local first, then compare it to the latest remote, and use whichever + // of the two is more recent. + e.Log.Info("no version specified, finding latest") + serverVer, platform := e.LatestVersion(ctx) + + // if we're not forcing a download, and we have a newer local version, just use that + if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { + e.Platform.Platform = localPlat // update our data with md5 + e.Version.MakeConcrete(*localVer) + return + } + + // otherwise, use the new version from the server + e.Platform = platform // update our data with md5 + e.Version.MakeConcrete(serverVer) +} + +// Fetch ensures that the requested platform and version are on disk. +// You must call EnsureVersionIsSet before calling this method. +// +// If ForceDownload is set, we always download, otherwise we only download +// if we're missing the version on disk. +func (e *Env) Fetch(ctx context.Context) { + log := e.Log.WithName("fetch") + + // if we didn't just fetch it, grab the sum to verify + if e.VerifySum && e.Platform.MD5 == "" { + if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { + ExitCause(2, err, "unable to fetch checksum for requested version") + } + } + if !e.VerifySum { + e.Platform.MD5 = "" // skip verification + } + + var packedPath string + + // cleanup on error (needs to be here so it will happen after the other defers) + defer e.cleanupOnError(func() { + if packedPath != "" { + e.Log.V(1).Info("cleaning up downloaded archive", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + e.Log.Error(err, "unable to clean up archive path", "path", packedPath) + } + } + }) + + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + if err != nil { + ExitCause(2, err, "unable to open file to write downloaded archive to") + } + defer archiveOut.Close() + packedPath = archiveOut.Name() + log.V(1).Info("writing downloaded archive", "path", packedPath) + + if err := e.Client.GetVersion(ctx, *e.Version.AsConcrete(), e.Platform, archiveOut); err != nil { + ExitCause(2, err, "unable to download requested version") + } + log.V(1).Info("downloaded archive", "path", packedPath) + + if err := archiveOut.Sync(); err != nil { // sync before reading back + ExitCause(2, err, "unable to flush downloaded archive file") + } + if _, err := archiveOut.Seek(0, 0); err != nil { + ExitCause(2, err, "unable to jump back to beginning of archive file to unzip") + } + + if err := e.Store.Add(ctx, e.item(), archiveOut); err != nil { + ExitCause(2, err, "unable to store version to disk") + } + + log.V(1).Info("removing archive from disk", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to remove downloaded archive", "path", packedPath) + } +} + +// cleanup on error cleans up if we hit an exitCode error. +// +// Use it in a defer. +func (e *Env) cleanupOnError(extraCleanup func()) { + cause := recover() + if cause == nil { + return + } + // don't panic in a panic handler + var exit *exitCode + if asExit(cause, &exit) && exit.code != 0 { + e.Log.Info("cleaning up due to error") + // we already log in the function, and don't want to panic, so + // ignore the error + extraCleanup() + } + panic(cause) // re-start the panic now that we're done +} + +// Remove removes the data for our version selector & platform from disk. +func (e *Env) Remove(ctx context.Context) { + items, err := e.Store.Remove(ctx, e.filter()) + for _, item := range items { + fmt.Fprintf(e.Out, "removed %s\n", item) + } + if err != nil { + ExitCause(2, err, "unable to remove all requested version(s)") + } +} + +// PrintInfo prints out information about a single, current version +// and platform, according to the given formatting info. +func (e *Env) PrintInfo(printFmt PrintFormat) { + // use the manual path if it's set, otherwise use the standard path + path := e.manualPath + if e.manualPath == "" { + item := e.item() + var err error + path, err = e.Store.Path(item) + if err != nil { + ExitCause(2, err, "unable to get path for version %s", item) + } + } + switch printFmt { + case PrintOverview: + fmt.Fprintf(e.Out, "Version: %s\n", e.Version) + fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) + if e.Platform.MD5 != "" { + fmt.Fprintf(e.Out, "md5: %s\n", e.Platform.MD5) + } + fmt.Fprintf(e.Out, "Path: %s\n", path) + case PrintPath: + fmt.Fprint(e.Out, path) // NB(directxman12): no newline -- want the bare path here + case PrintEnv: + // quote in case there are spaces, etc in the path + // the weird string below works like this: + // - you can't escape quotes in shell + // - shell strings that are next to each other are concatenated (so "a""b""c" == "abc") + // - you can intermix quote styles using the above + // - so `'"'"'` --> CLOSE_QUOTE + "'" + OPEN_QUOTE + shellQuoted := strings.ReplaceAll(path, "'", `'"'"'`) + fmt.Fprintf(e.Out, "export KUBEBUILDER_ASSETS='%s'\n", shellQuoted) + default: + panic(fmt.Sprintf("unexpected print format %v", printFmt)) + } +} + +// EnsureBaseDirs ensures that the base packed and unpacked directories +// exist. +// +// This should be the first thing called after CheckCoherence. +func (e *Env) EnsureBaseDirs(ctx context.Context) { + if err := e.Store.Initialize(ctx); err != nil { + ExitCause(2, err, "unable to make sure store is initialized") + } +} + +// Sideload takes an input stream, and loads it as if it had been a downloaded .tar.gz file +// for the current *concrete* version and platform. +func (e *Env) Sideload(ctx context.Context, input io.Reader) { + log := e.Log.WithName("sideload") + if e.Version.AsConcrete() == nil || e.Platform.IsWildcard() { + Exit(2, "must specify a concrete version and platform to sideload. Make sure you've passed a version, like 'sideload 1.21.0'") + } + log.V(1).Info("sideloading from input stream to version", "version", e.Version, "platform", e.Platform) + if err := e.Store.Add(ctx, e.item(), input); err != nil { + ExitCause(2, err, "unable to sideload item to disk") + } +} + +var ( + // expectedExectuables are the executables that are checked in PathMatches + // for non-store paths. + expectedExecutables = []string{ + "kube-apiserver", + "etcd", + "kubectl", + } +) + +// PathMatches checks if the path (e.g. from the environment variable) +// matches this version & platform selector, and if so, returns true. +func (e *Env) PathMatches(value string) bool { + e.Log.V(1).Info("checking if (env var) path represents our desired version", "path", value) + if value == "" { + // if we're unset, + return false + } + + if e.versionFromPathName(value) { + e.Log.V(1).Info("path appears to be in our store, using that info", "path", value) + return true + } + + e.Log.V(1).Info("path is not in our store, checking for binaries", "path", value) + for _, expected := range expectedExecutables { + _, err := e.FS.Stat(filepath.Join(value, expected)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // one of our required binaries is missing, return false + e.Log.V(1).Info("missing required binary in (env var) path", "binary", expected, "path", value) + return false + } + ExitCause(2, err, "unable to check for existence of binary %s from existing (env var) path %s", value, expected) + } + } + + // success, all binaries present + e.Log.V(1).Info("all required binaries present in (env var) path, using that", "path", value) + + // don't bother checking the version, the user explicitly asked us to use this + // we don't know the version, so set it to wildcard + e.Version = versions.AnyVersion + e.Platform.OS = "*" + e.Platform.Arch = "*" + e.manualPath = value + return true +} + +// versionFromPathName checks if the given path's last component looks like one +// of our versions, and, if so, what version it represents. If succesfull, +// it'll set version and platform, and return true. Otherwise it returns +// false. +func (e *Env) versionFromPathName(value string) bool { + baseName := filepath.Base(value) + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, baseName) + if ver == nil { + // not a version that we can tell + return false + } + + // yay we got a version! + e.Version.MakeConcrete(*ver) + e.Platform.Platform = pl + e.manualPath = value // might be outside our store, set this just in case + + return true +} diff --git a/tools/setup-envtest/env/env_suite_test.go b/tools/setup-envtest/env/env_suite_test.go new file mode 100644 index 0000000000..7d9fe9c179 --- /dev/null +++ b/tools/setup-envtest/env/env_suite_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func TestEnv(t *testing.T) { + testLog = zapLogger() + + RegisterFailHandler(Fail) + RunSpecs(t, "Env Suite") +} diff --git a/tools/setup-envtest/env/env_test.go b/tools/setup-envtest/env/env_test.go new file mode 100644 index 0000000000..874ac7a736 --- /dev/null +++ b/tools/setup-envtest/env/env_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env_test + +import ( + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Env", func() { + // Most of the rest of this is tested e2e via the workflows test, + // but there's a few things that are easier to test here. Eventually + // we should maybe move some of the tests here. + var ( + env *Env + outBuffer *bytes.Buffer + ) + BeforeEach(func() { + outBuffer = new(bytes.Buffer) + env = &Env{ + Out: outBuffer, + Log: testLog, + + Store: &store.Store{ + // use spaces and quotes to test our quote escaping below + Root: afero.NewBasePathFs(afero.NewMemMapFs(), "/kb's test store"), + }, + + // shouldn't use these, but just in case + NoDownload: true, + FS: afero.Afero{Fs: afero.NewMemMapFs()}, + } + + env.Version.MakeConcrete(versions.Concrete{ + Major: 1, Minor: 21, Patch: 3, + }) + env.Platform.Platform = versions.Platform{ + OS: "linux", Arch: "amd64", + } + }) + + Describe("printing", func() { + It("should use a manual path if one is present", func() { + By("using a manual path") + Expect(env.PathMatches("/otherstore/1.21.4-linux-amd64")).To(BeTrue()) + + By("checking that that path is printed properly") + env.PrintInfo(PrintPath) + Expect(outBuffer.String()).To(Equal("/otherstore/1.21.4-linux-amd64")) + }) + + Context("as human-readable info", func() { + BeforeEach(func() { + env.PrintInfo(PrintOverview) + }) + + It("should contain the version", func() { + Expect(outBuffer.String()).To(ContainSubstring("/kb's test store/k8s/1.21.3-linux-amd64")) + }) + It("should contain the path", func() { + Expect(outBuffer.String()).To(ContainSubstring("1.21.3")) + }) + It("should contain the platform", func() { + Expect(outBuffer.String()).To(ContainSubstring("linux/amd64")) + }) + + }) + Context("as just a path", func() { + It("should print out just the path", func() { + env.PrintInfo(PrintPath) + Expect(outBuffer.String()).To(Equal(`/kb's test store/k8s/1.21.3-linux-amd64`)) + }) + }) + + Context("as env vars", func() { + BeforeEach(func() { + env.PrintInfo(PrintEnv) + }) + It("should set KUBEBUILDER_ASSETS", func() { + Expect(outBuffer.String()).To(HavePrefix("export KUBEBUILDER_ASSETS=")) + }) + It("should quote the return path, escaping quotes to deal with spaces, etc", func() { + Expect(outBuffer.String()).To(HaveSuffix(`='/kb'"'"'s test store/k8s/1.21.3-linux-amd64'` + "\n")) + }) + }) + }) +}) diff --git a/tools/setup-envtest/env/exit.go b/tools/setup-envtest/env/exit.go new file mode 100644 index 0000000000..ae393b593b --- /dev/null +++ b/tools/setup-envtest/env/exit.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "errors" + "fmt" + "os" +) + +// Exit exits with the given code and error message. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func Exit(code int, msg string, args ...interface{}) { + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg, args...), + }) +} + +// ExitCause exits with the given code and error message, automatically +// wrapping the underlying error passed as well. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func ExitCause(code int, err error, msg string, args ...interface{}) { + args = append(args, err) + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg+": %w", args...), + }) +} + +// exitCode is an error that indicates, on a panic, to exit with the given code +// and message. +type exitCode struct { + code int + err error +} + +func (c *exitCode) Error() string { + return fmt.Sprintf("%v (exit code %d)", c.err, c.code) +} +func (c *exitCode) Unwrap() error { + return c.err +} + +// asExit checks if the given (panic) value is an exitCode error, +// and if so stores it in the given pointer. It's roughly analogous +// to errors.As, except it works on recover() values. +func asExit(val interface{}, exit **exitCode) bool { + if val == nil { + return false + } + err, isErr := val.(error) + if !isErr { + return false + } + if !errors.As(err, exit) { + return false + } + return true +} + +// HandleExitWithCode handles panics of type exitCode, +// printing the status message and existing with the given +// exit code, or re-raising if not an exitCode error. +// +// This should be the first defer in your main function. +func HandleExitWithCode() { + if cause := recover(); CheckRecover(cause, func(code int, err error) { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(code) + }) { + panic(cause) + } +} + +// CheckRecover checks the value of cause, calling the given callback +// if it's an exitCode error. It returns true if we should re-panic +// the cause. +// +// It's mainly useful for testing, normally you'd use HandleExitWithCode. +func CheckRecover(cause interface{}, cb func(int, error)) bool { + if cause == nil { + return false + } + var exitErr *exitCode + if !asExit(cause, &exitErr) { + // re-raise if it's not an exit error + return true + } + + cb(exitErr.code, exitErr.err) + return false +} diff --git a/tools/setup-envtest/env/helpers.go b/tools/setup-envtest/env/helpers.go new file mode 100644 index 0000000000..2c98c88d95 --- /dev/null +++ b/tools/setup-envtest/env/helpers.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} + +// PrintFormat indicates how to print out fetch and switch results. +// It's a valid pflag.Value so it can be used as a flag directly. +type PrintFormat int + +const ( + // PrintOverview prints human-readable data, + // including path, version, arch, and checksum (when available). + PrintOverview PrintFormat = iota + // PrintPath prints *only* the path, with no decoration. + PrintPath + // PrintEnv prints the path with the corresponding env variable, so that + // you can source the output like + // `source $(fetch-envtest switch -p env 1.20.x)`. + PrintEnv +) + +func (f PrintFormat) String() string { + switch f { + case PrintOverview: + return "overview" + case PrintPath: + return "path" + case PrintEnv: + return "env" + default: + panic(fmt.Sprintf("unexpected print format %d", int(f))) + } +} + +// Set sets the value of this as a flag. +func (f *PrintFormat) Set(val string) error { + switch val { + case "overview": + *f = PrintOverview + case "path": + *f = PrintPath + case "env": + *f = PrintEnv + default: + return fmt.Errorf("unknown print format %q, use one of overview|path|env", val) + } + return nil +} + +// Type is the type of this value as a flag. +func (PrintFormat) Type() string { + return "{overview|path|env}" +} diff --git a/tools/setup-envtest/go.mod b/tools/setup-envtest/go.mod new file mode 100644 index 0000000000..0bc6208abc --- /dev/null +++ b/tools/setup-envtest/go.mod @@ -0,0 +1,27 @@ +module sigs.k8s.io/controller-runtime/tools/setup-envtest + +go 1.17 + +require ( + github.com/go-logr/logr v1.2.0 + github.com/go-logr/zapr v1.2.0 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.17.0 + github.com/spf13/afero v1.6.0 + github.com/spf13/pflag v1.0.5 + go.uber.org/zap v1.19.1 +) + +require ( + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/nxadm/tail v1.4.8 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect + golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect + golang.org/x/text v0.3.6 // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/tools/setup-envtest/go.sum b/tools/setup-envtest/go.sum new file mode 100644 index 0000000000..3e5a29f95e --- /dev/null +++ b/tools/setup-envtest/go.sum @@ -0,0 +1,149 @@ +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/setup-envtest/main.go b/tools/setup-envtest/main.go new file mode 100644 index 0000000000..517d12b9d2 --- /dev/null +++ b/tools/setup-envtest/main.go @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package main + +import ( + goflag "flag" + "fmt" + "os" + "runtime" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/spf13/afero" + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +const ( + // envNoDownload is an env variable that can be set to always force + // the --installed-only, -i flag to be set. + envNoDownload = "ENVTEST_INSTALLED_ONLY" + // envUseEnv is an env variable that can be set to control the --use-env + // flag globally. + envUseEnv = "ENVTEST_USE_ENV" +) + +var ( + force = flag.Bool("force", false, "force re-downloading dependencies, even if they're already present and correct") + installedOnly = flag.BoolP("installed-only", "i", os.Getenv(envNoDownload) != "", + "only look at installed versions -- do not query the remote API server, "+ + "and error out if it would be necessary to") + verify = flag.Bool("verify", true, "verify dependencies while downloading") + useEnv = flag.Bool("use-env", os.Getenv(envUseEnv) != "", "whether to return the value of KUBEBUILDER_ASSETS if it's already set") + + targetOS = flag.String("os", runtime.GOOS, "os to download for (e.g. linux, darwin, for listing operations, use '*' to list all platforms)") + targetArch = flag.String("arch", runtime.GOARCH, "architecture to download for (e.g. amd64, for listing operations, use '*' to list all platforms)") + + // printFormat is the flag value for -p, --print. + printFormat = envp.PrintOverview + // zapLvl is the flag value for logging verbosity. + zapLvl = zap.WarnLevel + + binDir = flag.String("bin-dir", "", + "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") + remoteBucket = flag.String("remote-bucket", "kubebuilder-tools", "remote GCS bucket to download from") + remoteServer = flag.String("remote-server", "storage.googleapis.com", + "remote server to query from. You can override this if you want to run "+ + "an internal storage server instead, or for testing.") +) + +// TODO(directxman12): handle interrupts? + +// setupLogging configures a Zap logger. +func setupLogging() logr.Logger { + logCfg := zap.NewDevelopmentConfig() + logCfg.Level = zap.NewAtomicLevelAt(zapLvl) + zapLog, err := logCfg.Build() + if err != nil { + envp.ExitCause(1, err, "who logs the logger errors?") + } + return zapr.NewLogger(zapLog) +} + +// setupEnv initializes the environment from flags. +func setupEnv(globalLog logr.Logger, version string) *envp.Env { + log := globalLog.WithName("setup") + if *binDir == "" { + dataDir, err := store.DefaultStoreDir() + if err != nil { + envp.ExitCause(1, err, "unable to deterimine default binaries directory (use --bin-dir to manually override)") + } + + *binDir = dataDir + } + log.V(1).Info("using binaries directory", "dir", *binDir) + + env := &envp.Env{ + Log: globalLog, + Client: &remote.Client{ + Log: globalLog.WithName("storage-client"), + Bucket: *remoteBucket, + Server: *remoteServer, + }, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, + Platform: versions.PlatformItem{ + Platform: versions.Platform{ + OS: *targetOS, + Arch: *targetArch, + }, + }, + FS: afero.Afero{Fs: afero.NewOsFs()}, + Store: store.NewAt(*binDir), + Out: os.Stdout, + } + + switch version { + case "", "latest": + env.Version = versions.LatestVersion + case "latest-on-disk": + // we sort by version, latest first, so this'll give us the latest on + // disk (as per the contract from env.List & store.List) + env.Version = versions.AnyVersion + env.NoDownload = true + default: + var err error + env.Version, err = versions.FromExpr(version) + if err != nil { + envp.ExitCause(1, err, "version be a valid version, or simply 'latest' or 'latest-on-disk'") + } + } + + env.CheckCoherence() + + return env +} + +func main() { + // exit with appropriate error codes -- this should be the first defer so + // that it's the last one executed. + defer envp.HandleExitWithCode() + + // set up flags + flag.Usage = func() { + name := os.Args[0] + fmt.Fprintf(os.Stderr, "Usage: %s [FLAGS] use|list|cleanup|sideload [VERSION]\n", name) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + ` +Note: this command is currently alpha, and the usage/behavior may change from release to release. + +Examples: + + # download the latest envtest, and print out info about it + %[1]s use + + # download the latest 1.19 envtest, and print out the path + %[1]s use -p path 1.19.x! + + # switch to the most recent 1.21 envtest on disk + source <(%[1]s use -i -p env 1.21.x) + + # list all available local versions for darwin/amd64 + %[1]s list -i --os darwin --arch amd64 + + # remove all versions older than 1.16 from disk + %[1]s cleanup <1.16 + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal + # logic for 'use' + %[1]s --use-env + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest + # installed version + %[1]s use -i --use-env + + # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store + %[1]s sideload 1.16.2 < downloaded-envtest.tar.gz + +Commands: + + use: + get information for the requested version, downloading it if necessary and allowed. + Needs a concrete platform (no wildcards), but wilcard versions are supported. + + list: + list installed *and* available versions matching the given version & platform. + May have wildcard versions *and* platforms. + If the -i flag is passed, only installed versions are listed. + + cleanup: + remove all versions matching the given version & platform selector. + May have wildcard versions *and* platforms. + + sideload: + reads a .tar.gz file from stdin and expand it into the store. + must have a concrete version and platform. + +Versions: + + Versions take the form of a small subset of semver selectors. + + Basic semver whole versions are accepted: X.Y.Z. + Z may also be '*' or 'x' to match a wildcard. + You may also just write X.Y, which means X.Y.*. + + A version may be prefixed with '~' to match the the most recent Z release + in the given Y release ( [X.Y.Z, X.Y+1.0) ). + + Finally, you may suffix the version with '!' to force checking the + remote API server for the latest version. + + For example: + + 1.16.x / 1.16.* / 1.16 # any 1.16 version + ~1.19.3 # any 1.19 version that's at least 1.19.3 + <1.17 # any release 1.17.x or below + 1.22.x! # the latest one 1.22 release available remotely + +Output: + + The fetch & switch commands respect the --print, -p flag. + + overview: human readable information + path: print out the path, by itself + env: print out the path in a form that can be sourced to use that version with envtest + + Other command have human-readable output formats only. + +Environment Variables: + + KUBEBUILDER_ASSETS: + --use-env will check this, and '-p/--print env' will return this. + If --use-env is true and this is set, we won't check our store + for versions -- we'll just immediately return whatever's in + this env var. + + %[2]s: + will switch the default of -i/--installed to true if set to any value + + %[3]s: + will switch the default of --use-env to true if set to any value + +`, name, envNoDownload, envUseEnv) + } + flag.CommandLine.AddGoFlag(&goflag.Flag{Name: "v", Usage: "logging level", Value: &zapLvl}) + flag.VarP(&printFormat, "print", "p", "what info to print after fetch-style commands (overview, path, env)") + needHelp := flag.Bool("help", false, "print out this help text") // register help so that we don't get an error at the end + flag.Parse() + + if *needHelp { + flag.Usage() + envp.Exit(2, "") + } + + // check our argument count + if numArgs := flag.NArg(); numArgs < 1 || numArgs > 2 { + flag.Usage() + envp.Exit(2, "please specify a command to use, and optionally a version selector") + } + + // set up logging + globalLog := setupLogging() + + // set up the environment + var version string + if flag.NArg() > 1 { + version = flag.Arg(1) + } + env := setupEnv(globalLog, version) + + // perform our main set of actions + switch action := flag.Arg(0); action { + case "use": + workflows.Use{ + UseEnv: *useEnv, + PrintFormat: printFormat, + AssetsPath: os.Getenv("KUBEBUILDER_ASSETS"), + }.Do(env) + case "list": + workflows.List{}.Do(env) + case "cleanup": + workflows.Cleanup{}.Do(env) + case "sideload": + workflows.Sideload{ + Input: os.Stdin, + PrintFormat: printFormat, + }.Do(env) + default: + flag.Usage() + envp.Exit(2, "unknown action %q", action) + } +} diff --git a/tools/setup-envtest/remote/client.go b/tools/setup-envtest/remote/client.go new file mode 100644 index 0000000000..be82532583 --- /dev/null +++ b/tools/setup-envtest/remote/client.go @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "crypto/md5" //nolint:gosec + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` + NextPageToken string `json:"nextPageToken"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +// Client is a basic client for fetching versions of the envtest binary archives +// from GCS. +type Client struct { + // Bucket is the bucket to fetch from. + Bucket string + + // Server is the GCS-like storage server + Server string + + // Log allows us to log. + Log logr.Logger + + // Insecure uses http for testing + Insecure bool +} + +func (c *Client) scheme() string { + if c.Insecure { + return "http" + } + return "https" +} + +// ListVersions lists all available tools versions in the given bucket, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *Client) ListVersions(ctx context.Context) ([]versions.Set, error) { + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o"), + } + query := make(url.Values) + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for cont := true; cont; { + c.Log.V(1).Info("listing bucket to get versions", "bucket", c.Bucket) + + loc.RawQuery = query.Encode() + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to list bucket items: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to list bucket items: %w", err) + } + + err = func() error { + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("unable list bucket items -- got status %q from GCS", resp.Status) + } + + var list objectList + if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { + return fmt.Errorf("unable unmarshal bucket items list: %w", err) + } + + // continue listing if needed + cont = list.NextPageToken != "" + query.Set("pageToken", list.NextPageToken) + + for _, item := range list.Items { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, item.Name) + if ver == nil { + c.Log.V(1).Info("skipping bucket object -- does not appear to be a versioned tools object", "name", item.Name) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + MD5: item.Hash, + }) + } + + return nil + }() + if err != nil { + return nil, err + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *Client) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + itemName := platform.ArchiveName(version) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + RawQuery: "alt=media", + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", itemName, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q from GCS", itemName, req.URL, resp.Status) + } + + if platform.MD5 != "" { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + checksum := md5.New() //nolint:gosec + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", itemName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + checksum.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", itemName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + sum := base64.StdEncoding.EncodeToString(checksum.Sum(nil)) + + if sum != platform.MD5 { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported from GCS)", itemName, sum, platform.MD5) + } + } else if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", itemName, err) + } + return nil +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *Client) FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error { + itemName := pl.ArchiveName(ver) + loc := &url.URL{ + Scheme: c.scheme(), + Host: c.Server, + Path: path.Join("/storage/v1/b/", c.Bucket, "o", itemName), + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch metadata for %s: %w", itemName, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch metadata for %s: %w", itemName, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch metadata for %s -- got status %q from GCS", itemName, resp.Status) + } + + var item bucketObject + if err := json.NewDecoder(resp.Body).Decode(&item); err != nil { + return fmt.Errorf("unable to unmarshal metadata for %s: %w", itemName, err) + } + + pl.MD5 = item.Hash + return nil +} diff --git a/tools/setup-envtest/store/helpers.go b/tools/setup-envtest/store/helpers.go new file mode 100644 index 0000000000..30902187e9 --- /dev/null +++ b/tools/setup-envtest/store/helpers.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "errors" + "os" + "path/filepath" + "runtime" +) + +// DefaultStoreDir returns the default location for the store. +// It's dependent on operating system: +// +// - Windows: %LocalAppData%\kubebuilder-envtest +// - OSX: ~/Library/Application Support/io.kubebuilder.envtest +// - Others: ${XDG_DATA_HOME:-~/.local/share}/kubebuilder-envtest +// +// Otherwise, it errors out. Note that these paths must not be relied upon +// manually. +func DefaultStoreDir() (string, error) { + var baseDir string + + // find the base data directory + switch runtime.GOOS { + case "windows": + baseDir = os.Getenv("LocalAppData") + if baseDir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + case "darwin", "ios": + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("$HOME is not defined") + } + baseDir = filepath.Join(homeDir, "Library/Application Support") + default: + baseDir = os.Getenv("XDG_DATA_HOME") + if baseDir == "" { + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("neither $XDG_DATA_HOME nor $HOME are defined") + } + baseDir = filepath.Join(homeDir, ".local/share") + } + } + + // append our program-specific dir to it (OSX has a slightly different + // convention so try to follow that). + switch runtime.GOOS { + case "darwin", "ios": + return filepath.Join(baseDir, "io.kubebuilder.envtest"), nil + default: + return filepath.Join(baseDir, "kubebuilder-envtest"), nil + } +} diff --git a/tools/setup-envtest/store/store.go b/tools/setup-envtest/store/store.go new file mode 100644 index 0000000000..e6f258e4ac --- /dev/null +++ b/tools/setup-envtest/store/store.go @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "archive/tar" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/go-logr/logr" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// TODO(directxman12): error messages don't show full path, which is gonna make +// things hard to debug + +// Item is a version-platform pair. +type Item struct { + Version versions.Concrete + Platform versions.Platform +} + +// dirName returns the directory name in the store for this item. +func (i Item) dirName() string { + return i.Platform.BaseName(i.Version) +} +func (i Item) String() string { + return fmt.Sprintf("%s (%s)", i.Version, i.Platform) +} + +// Filter is a version spec & platform selector (i.e. platform +// potentially with wilcards) to filter store items. +type Filter struct { + Version versions.Spec + Platform versions.Platform +} + +// Matches checks if this filter matches the given item. +func (f Filter) Matches(item Item) bool { + return f.Version.Matches(item.Version) && f.Platform.Matches(item.Platform) +} + +// Store knows how to list, load, store, and delete envtest tools. +type Store struct { + // Root is the root FS that the store stores in. You'll probably + // want to use a BasePathFS to scope it down to a particular directory. + // + // Note that if for some reason there are nested BasePathFSes, and they're + // interrupted by a non-BasePathFS, Path won't work properly. + Root afero.Fs +} + +// NewAt creates a new store on disk at the given path. +func NewAt(path string) *Store { + return &Store{ + Root: afero.NewBasePathFs(afero.NewOsFs(), path), + } +} + +// Initialize ensures that the store is all set up on disk, etc. +func (s *Store) Initialize(ctx context.Context) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + log.V(1).Info("ensuring base binaries dir exists") + if err := s.unpackedBase().MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure base binaries dir exists: %w", err) + } + return nil +} + +// Has checks if an item exists in the store. +func (s *Store) Has(item Item) (bool, error) { + path := s.unpackedPath(item.dirName()) + _, err := path.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return false, fmt.Errorf("unable to check if version-platform dir exists: %w", err) + } + return err == nil, nil +} + +// List lists all items matching the given filter. +// +// Results are stored by version (newest first), and OS/arch (consistently, +// but no guaranteed ordering). +func (s *Store) List(ctx context.Context, matching Filter) ([]Item, error) { + var res []Item + if err := s.eachItem(ctx, matching, func(_ string, item Item) { + res = append(res, item) + }); err != nil { + return nil, fmt.Errorf("unable to list version-platform pairs in store: %w", err) + } + + sort.Slice(res, func(i, j int) bool { + if !res[i].Version.Matches(res[j].Version) { + return res[i].Version.NewerThan(res[j].Version) + } + return orderPlatforms(res[i].Platform, res[j].Platform) + }) + + return res, nil +} + +// Add adds this item to the store, with the given contents (a .tar.gz file). +func (s *Store) Add(ctx context.Context, item Item, contents io.Reader) (resErr error) { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + itemName := item.dirName() + log = log.WithValues("version-platform", itemName) + itemPath := s.unpackedPath(itemName) + + // make sure to clean up if we hit an error + defer func() { + if resErr != nil { + // intentially ignore this because we can't really do anything + err := s.removeItem(itemPath) + if err != nil { + log.Error(err, "unable to clean up partially added version-platform pair after error") + } + } + }() + + log.V(1).Info("ensuring version-platform binaries dir exists and is empty & writable") + _, err = itemPath.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to ensure version-platform binaries dir %s exists", itemName) + } + if err == nil { // exists + log.V(1).Info("cleaning up old version-platform binaries dir") + if err := s.removeItem(itemPath); err != nil { + return fmt.Errorf("unable to clean up existing version-platform binaries dir %s", itemName) + } + } + if err := itemPath.MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure entry dir %s exists", itemName) + } + + log.V(1).Info("extracting archive") + gzStream, err := gzip.NewReader(contents) + if err != nil { + return fmt.Errorf("unable to start un-gz-ing entry archive") + } + tarReader := tar.NewReader(gzStream) + + var header *tar.Header + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { + if header.Typeflag != tar.TypeReg { // TODO(directxman12): support symlinks, etc? + log.V(1).Info("skipping non-regular-file entry in archive", "entry", header.Name) + continue + } + // just dump all files to the main path, ignoring the prefixed directory + // paths -- they're redundant. We also ignore bits for the most part (except for X), + // preferfing our own scheme. + targetPath := filepath.Base(header.Name) + log.V(1).Info("writing archive file to disk", "archive file", header.Name, "on-disk file", targetPath) + perms := 0555 & header.Mode // make sure we're at most r+x + binOut, err := itemPath.OpenFile(targetPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(perms)) + if err != nil { + return fmt.Errorf("unable to create file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + if err := func() error { // IIFE to get the defer properly in a loop + defer binOut.Close() + if _, err := io.Copy(binOut, tarReader); err != nil { //nolint:gosec + return fmt.Errorf("unable to write file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + return nil + }(); err != nil { + return err + } + } + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable to finish un-tar-ing the downloaded archive: %w", err) + } + log.V(1).Info("unpacked archive") + + log.V(1).Info("switching version-platform directory to read-only") + if err := itemPath.Chmod("", 0555); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to make version-platform directory read-only") + } + return nil +} + +// Remove removes all items matching the given filter. +// +// It returns a list of the successfully removed items (even in the case +// of an error). +func (s *Store) Remove(ctx context.Context, matching Filter) ([]Item, error) { + log, err := logr.FromContext(ctx) + if err != nil { + return nil, err + } + + var removed []Item + var savedErr error + if err := s.eachItem(ctx, matching, func(name string, item Item) { + log.V(1).Info("Removing version-platform pair at path", "version-platform", item, "path", name) + + if err := s.removeItem(s.unpackedPath(name)); err != nil { + log.Error(err, "unable to make existing version-platform dir writable to clean it up", "path", name) + savedErr = fmt.Errorf("unable to remove version-platform pair %s (dir %s): %w", item, name, err) + return // don't mark this as removed in the report + } + removed = append(removed, item) + }); err != nil { + return removed, fmt.Errorf("unable to list version-platform pairs to figure out what to delete: %w", err) + } + if savedErr != nil { + return removed, savedErr + } + return removed, nil +} + +// Path returns an actual path that case be used to access this item. +func (s *Store) Path(item Item) (string, error) { + path := s.unpackedPath(item.dirName()) + // NB(directxman12): we need root's realpath because RealPath only + // looks at its own path, and so thus doesn't prepend the underlying + // root's base path. + // + // Technically, if we're fed something that's double wrapped as root, + // this'll be wrong, but this is basically as much as we can do + return afero.FullBaseFsPath(path.(*afero.BasePathFs), ""), nil +} + +// unpackedBase returns the directory in which item dirs lives. +func (s *Store) unpackedBase() afero.Fs { + return afero.NewBasePathFs(s.Root, "k8s") +} + +// unpackedPath returns the item dir with this name. +func (s *Store) unpackedPath(name string) afero.Fs { + return afero.NewBasePathFs(s.unpackedBase(), name) +} + +// eachItem iterates through the on-disk versions that match our version & platform selector, +// calling the callback for each. +func (s *Store) eachItem(ctx context.Context, filter Filter, cb func(name string, item Item)) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + entries, err := afero.ReadDir(s.unpackedBase(), "") + if err != nil { + return fmt.Errorf("unable to list folders in store's unpacked directory: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + log.V(1).Info("skipping dir entry, not a folder", "entry", entry.Name()) + continue + } + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, entry.Name()) + if ver == nil { + log.V(1).Info("skipping dir entry, not a version", "entry", entry.Name()) + continue + } + item := Item{Version: *ver, Platform: pl} + + if !filter.Matches(item) { + log.V(1).Info("skipping on disk version, does not match version and platform selectors", "platform", pl, "version", ver, "entry", entry.Name()) + continue + } + + cb(entry.Name(), item) + } + + return nil +} + +// removeItem removes the given item directory from disk. +func (s *Store) removeItem(itemDir afero.Fs) error { + if err := itemDir.Chmod("", 0755); err != nil { + // no point in trying to remove if we can't fix the permissions, bail here + return fmt.Errorf("unable to make version-platform dir writable: %w", err) + } + if err := itemDir.RemoveAll(""); err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to remove version-platform dir: %w", err) + } + return nil +} + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} diff --git a/tools/setup-envtest/store/store_suite_test.go b/tools/setup-envtest/store/store_suite_test.go new file mode 100644 index 0000000000..2eb909af6b --- /dev/null +++ b/tools/setup-envtest/store/store_suite_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func logCtx() context.Context { + return logr.NewContext(context.Background(), testLog) +} + +func TestStore(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Store Suite") +} diff --git a/tools/setup-envtest/store/store_test.go b/tools/setup-envtest/store/store_test.go new file mode 100644 index 0000000000..862996abfa --- /dev/null +++ b/tools/setup-envtest/store/store_test.go @@ -0,0 +1,250 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "io" + "io/fs" + "math/rand" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +const ( + fakeStorePath = "/path/to/the/store" +) + +var _ = Describe("Store", func() { + var st *store.Store + BeforeEach(func() { + fs := afero.NewMemMapFs() + fakeStoreFiles(fs, fakeStorePath) + st = &store.Store{ + Root: afero.NewBasePathFs(fs, fakeStorePath), + } + }) + Describe("initialization", func() { + It("should ensure the repo root exists", func() { + // remove the old dir + Expect(st.Root.RemoveAll("")).To(Succeed(), "should be able to remove the store before trying to initialize") + + Expect(st.Initialize(logCtx())).To(Succeed(), "initialization should succeed") + Expect(st.Root.Stat("k8s")).NotTo(BeNil(), "store's binary dir should exist") + }) + + It("should be fine if the repo root already exists", func() { + Expect(st.Initialize(logCtx())).To(Succeed()) + }) + }) + Describe("listing items", func() { + It("should filter results by the given filter, sorted in version order (newest first)", func() { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + Expect(st.List(logCtx(), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + })).To(Equal([]store.Item{ + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + })) + }) + It("should skip non-folders in the store", func() { + Expect(afero.WriteFile(st.Root, "k8s/2.3.6-linux-amd128", []byte{0x01}, fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + + It("should skip non-matching names in the store", func() { + Expect(st.Root.Mkdir("k8s/somedir-2.3.6-linux-amd128", fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + }) + + Describe("removing items", func() { + var res []store.Item + BeforeEach(func() { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + res, err = st.Remove(logCtx(), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + }) + Expect(err).NotTo(HaveOccurred(), "should be able to remove <=1.16 & */amd64") + }) + It("should return all items removed", func() { + Expect(res).To(ConsistOf( + store.Item{Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + )) + }) + It("should remove all items matching the given filter from disk", func() { + Expect(afero.ReadDir(st.Root, "k8s")).NotTo(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.1-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.0-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-linux-amd64")), + )) + }) + + It("should leave items that don't match in place", func() { + Expect(afero.ReadDir(st.Root, "k8s")).To(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.17.9-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.2-linux-yourimagination")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("adding items", func() { + It("should support .tar.gz input", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Has(newItem)).To(BeTrue(), "should have the item after adding it") + }) + + It("should extract binaries from the given archive to a directly to the item's directory, regardless of path", func() { + Expect(st.Add(logCtx(), newItem, makeFakeArchive(newName))).To(Succeed()) + + dirName := newItem.Platform.BaseName(newItem.Version) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(newName + "some-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(newName + "other-file")) + }) + + It("should clean up any existing item directory before creating the new one", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, makeFakeArchive(newName))).To(Succeed()) + Expect(st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version)))).NotTo(BeNil(), "new files should exist") + }) + It("should clean up if it errors before finishing", func() { + item := localVersions[0] + Expect(st.Add(logCtx(), item, new(bytes.Buffer))).NotTo(Succeed(), "should fail to extract") + _, err := st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version))) + Expect(err).To(HaveOccurred(), "the binaries dir for the item should be gone") + + }) + }) + + Describe("checking if items are present", func() { + It("should report that present directories are present", func() { + Expect(st.Has(localVersions[0])).To(BeTrue()) + }) + + It("should report that absent directories are absent", func() { + Expect(st.Has(newItem)).To(BeFalse()) + }) + }) + + Describe("getting the path", func() { + It("should return the absolute on-disk path of the given item", func() { + item := localVersions[0] + Expect(st.Path(item)).To(Equal(filepath.Join(fakeStorePath, "k8s", item.Platform.BaseName(item.Version)))) + }) + }) +}) + +var ( + // keep this sorted. + localVersions = []store.Item{ + {Version: ver(1, 17, 9), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + } + + newItem = store.Item{ + Version: ver(1, 16, 3), + Platform: versions.Platform{OS: "linux", Arch: "amd64"}, + } + + newName = "kubebuilder-tools-1.16.3-linux-amd64.tar.gz" +) + +func ver(major, minor, patch int) versions.Concrete { + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func makeFakeArchive(magic string) io.Reader { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + Expect(tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: "kubebuilder/bin/", // so we can ensure we skip non-files + Mode: 0777, + })).To(Succeed()) + for _, fileName := range []string{"some-file", "other-file"} { + // create fake file contents: magic+fileName+randomBytes() + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], magic) + copy(chunk[len(magic):], fileName) + start := len(magic) + len(fileName) + if _, err := rand.Read(chunk[start:]); err != nil { //nolint:gosec + panic(err) + } + + // write to kubebuilder/bin/fileName + err := tarWriter.WriteHeader(&tar.Header{ + Name: "kubebuilder/bin/" + fileName, + Size: int64(len(chunk[:])), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(chunk[:]) + if err != nil { + panic(err) + } + } + tarWriter.Close() + gzipWriter.Close() + + return out +} + +func fakeStoreFiles(fs afero.Fs, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, item := range localVersions { + Expect(fs.Mkdir(filepath.Join(unpackedBase, item.Platform.BaseName(item.Version)), 0755)).To(Succeed()) + } +} diff --git a/tools/setup-envtest/versions/misc_test.go b/tools/setup-envtest/versions/misc_test.go new file mode 100644 index 0000000000..3429211f27 --- /dev/null +++ b/tools/setup-envtest/versions/misc_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Concrete", func() { + It("should match the only same version", func() { + ver16 := Concrete{Major: 1, Minor: 16} + ver17 := Concrete{Major: 1, Minor: 17} + Expect(ver16.Matches(ver16)).To(BeTrue(), "should match the same version") + Expect(ver16.Matches(ver17)).To(BeFalse(), "should not match a different version") + }) + It("should serialize as X.Y.Z", func() { + Expect(Concrete{Major: 1, Minor: 16, Patch: 3}.String()).To(Equal("1.16.3")) + }) + Describe("when ordering relative to other versions", func() { + ver1163 := Concrete{Major: 1, Minor: 16, Patch: 3} + Specify("newer patch should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 16})).To(BeTrue()) + }) + Specify("newer minor should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue()) + }) + Specify("newer major should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue()) + }) + }) +}) + +var _ = Describe("Platform", func() { + Specify("a concrete platform should match exactly itself", func() { + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(plat1.Matches(plat1)).To(BeTrue(), "should match itself") + Expect(plat1.Matches(plat2)).To(BeFalse(), "should reject a different arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard arch should match any arch", func() { + sel := Platform{OS: "linux", Arch: "*"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some arch with the same OS") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another arch with the same OS") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard os should match any os", func() { + sel := Platform{OS: "*", Arch: "amd64"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "windows", Arch: "amd64"} + plat3 := Platform{OS: "linux", Arch: "s390x"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some os with the same arch") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another os with the same arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different arch") + }) + It("should report a wildcard OS as a wildcard platform", func() { + Expect(Platform{OS: "*", Arch: "amd64"}.IsWildcard()).To(BeTrue()) + }) + It("should report a wildcard arch as a wildcard platform", func() { + Expect(Platform{OS: "linux", Arch: "*"}.IsWildcard()).To(BeTrue()) + }) + It("should serialize as os/arch", func() { + Expect(Platform{OS: "linux", Arch: "amd64"}.String()).To(Equal("linux/amd64")) + }) + + Specify("knows how to produce a base store name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.BaseName(ver)).To(Equal("1.16.3-linux-amd64")) + }) + + Specify("knows how to produce an archive name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.ArchiveName(ver)).To(Equal("kubebuilder-tools-1.16.3-linux-amd64.tar.gz")) + }) + + Describe("parsing", func() { + Context("for version-platform names", func() { + It("should accept strings of the form x.y.z-os-arch", func() { + ver, plat := ExtractWithPlatform(VersionPlatformRE, "1.16.3-linux-amd64") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(VersionPlatformRE, "1.16-linux-amd64") + Expect(ver).To(BeNil()) + }) + }) + Context("for archive names", func() { + It("should accept strings of the form kubebuilder-tools-x.y.z-os-arch.tar.gz", func() { + ver, plat := ExtractWithPlatform(ArchiveRE, "kubebuilder-tools-1.16.3-linux-amd64.tar.gz") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(ArchiveRE, "kubebuilder-tools-1.16.3-linux-amd64.tar.sum") + Expect(ver).To(BeNil()) + }) + }) + }) +}) + +var _ = Describe("Spec helpers", func() { + Specify("can fill a spec with a concrete version", func() { + spec := Spec{Selector: AnySelector{}} // don't just use AnyVersion so we don't modify it + spec.MakeConcrete(Concrete{Major: 1, Minor: 16}) + Expect(spec.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16})) + }) + It("should serialize as the underlying selector with ! for check latest", func() { + spec, err := FromExpr("1.16.*!") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*!")) + }) + It("should serialize as the underlying selector by itself if not check latest", func() { + spec, err := FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*")) + }) +}) diff --git a/tools/setup-envtest/versions/parse.go b/tools/setup-envtest/versions/parse.go new file mode 100644 index 0000000000..c053bf8757 --- /dev/null +++ b/tools/setup-envtest/versions/parse.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + // baseVersionRE is a semver-ish version -- either X.Y.Z, X.Y, or X.Y.{*|x}. + baseVersionRE = `(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:\.(?P0|[1-9]\d*|x|\*))?` + // versionExprRe matches valid version input for FromExpr. + versionExprRE = regexp.MustCompile(`^(?P<|~|<=)?` + baseVersionRE + `(?P!)?$`) + + // ConcreteVersionRE matches a concrete version anywhere in the string. + ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) + // OnlyConcreteVersionRE matches a string that's just a concrete version. + OnlyConcreteVersionRE = regexp.MustCompile(`^` + ConcreteVersionRE.String() + `$`) +) + +// FromExpr extracts a version from a string in the form of a semver version, +// where X, Y, and Z may also be wildcards ('*', 'x'), +// and pre-release names & numbers may also be wildcards. The prerelease section is slightly +// restricted to match what k8s does. +// The whole string is a version selector as follows: +// - X.Y.Z matches version X.Y.Z where x, y, and z are +// are ints >= 0, and Z may be '*' or 'x' +// - X.Y is equivalent to X.Y.* +// - ~X.Y.Z means >= X.Y.Z && < X.Y+1.0 +// - = comparisons, if we use + // wildcards with a selector we can just set them to zero. + if verInfo.Patch == AnyPoint { + verInfo.Patch = PointVersion(0) + } + baseVer := *verInfo.AsConcrete() + spec.Selector = TildeSelector{Concrete: baseVer} + default: + panic("unreachable: mismatch between FromExpr and its RE in selector") + } + + return spec, nil +} + +// PointVersionFromValidString extracts a point version +// from the corresponding string representation, which may +// be a number >= 0, or x|* (AnyPoint). +// +// Anything else will cause a panic (use this on strings +// extracted from regexes). +func PointVersionFromValidString(str string) PointVersion { + switch str { + case "*", "x": + return AnyPoint + default: + ver, err := strconv.Atoi(str) + if err != nil { + panic(err) + } + return PointVersion(ver) + } +} + +// PatchSelectorFromMatch constructs a simple selector according to the +// ParseExpr rules out of pre-validated sections. +// +// re must include name captures for major, minor, patch, prenum, and prelabel +// +// Any bad input may cause a panic. Use with when you got the parts from an RE match. +func PatchSelectorFromMatch(match []string, re *regexp.Regexp) PatchSelector { + // already parsed via RE, should be fine to ignore errors unless it's a + // *huge* number + major, err := strconv.Atoi(match[re.SubexpIndex("major")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + minor, err := strconv.Atoi(match[re.SubexpIndex("minor")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + + // patch is optional, means wilcard if left off + patch := AnyPoint + if patchRaw := match[re.SubexpIndex("patch")]; patchRaw != "" { + patch = PointVersionFromValidString(patchRaw) + } + return PatchSelector{ + Major: major, + Minor: minor, + Patch: patch, + } +} diff --git a/tools/setup-envtest/versions/parse_test.go b/tools/setup-envtest/versions/parse_test.go new file mode 100644 index 0000000000..81b8276b90 --- /dev/null +++ b/tools/setup-envtest/versions/parse_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func patchSel(x, y int, z PointVersion) PatchSelector { + return PatchSelector{Major: x, Minor: y, Patch: z} +} + +func patchSpec(x, y int, z PointVersion) Spec { + return Spec{Selector: patchSel(x, y, z)} +} + +func tildeSel(x, y, z int) TildeSelector { + return TildeSelector{ + Concrete: Concrete{ + Major: x, Minor: y, Patch: z, + }, + } +} + +func tildeSpec(x, y, z int) Spec { + return Spec{Selector: tildeSel(x, y, z)} +} +func ltSpec(x, y int, z PointVersion) Spec { + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + }} +} +func lteSpec(x, y int, z PointVersion) Spec { + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + OrEquals: true, + }} +} + +var _ = Describe("Parse", func() { + DescribeTable("it should support", + func(spec string, expected Spec) { + Expect(FromExpr(spec)).To(Equal(expected)) + }, + Entry("X.Y versions", "1.16", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.Z versions", "1.16.3", patchSpec(1, 16, PointVersion(3))), + Entry("X.Y.x wildcard", "1.16.x", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.* wildcard", "1.16.*", patchSpec(1, 16, AnyPoint)), + + Entry("~X.Y selector", "~1.16", tildeSpec(1, 16, 0)), + Entry("~X.Y.Z selector", "~1.16.3", tildeSpec(1, 16, 3)), + Entry("~X.Y.x selector", "~1.16.x", tildeSpec(1, 16, 0)), + Entry("~X.Y.* selector", "~1.16.*", tildeSpec(1, 16, 0)), + + Entry("\w+)-(?P\w+)` + // VersionPlatformRE matches concrete version-platform strings. + VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) + // ArchiveRE matches concrete version-platform.tar.gz strings. + ArchiveRE = regexp.MustCompile(`^kubebuilder-tools-` + versionPlatformREBase + `\.tar\.gz$`) +) diff --git a/tools/setup-envtest/versions/selectors_test.go b/tools/setup-envtest/versions/selectors_test.go new file mode 100644 index 0000000000..340c825031 --- /dev/null +++ b/tools/setup-envtest/versions/selectors_test.go @@ -0,0 +1,216 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Selectors", func() { + Describe("patch", func() { + var sel Selector + Context("with any patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + }) + + It("should match any patch version with the same major & minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + }) + + It("should serialize as X.Y.*", func() { + Expect(sel.String()).To(Equal("1.16.*")) + }) + + It("should not be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Context("with a specific patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should reject a different patch", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + }) + It("should serialize as X.Y.Z", func() { + Expect(sel.String()).To(Equal("1.16.3")) + }) + It("may be concrete", func() { + Expect(sel.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + }) + }) + + }) + + Describe("tilde", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("~1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should match a patch greater than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeTrue(), "should match 1.16.4") + }) + + It("should reject a patch less than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeFalse(), "should reject 1.16.2") + + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should treat ~X.Y.* as ~X.Y.Z", func() { + sel, err := FromExpr("~1.16.*") + Expect(err).NotTo(HaveOccurred()) + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 0})).To(BeFalse(), "should reject 1.17.0") + }) + It("should serialize as ~X.Y.Z", func() { + Expect(sel.String()).To(Equal("~1.16.3")) + }) + It("should never be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Describe("less-than", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("<1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should reject the exact major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 1.16.3") + + }) + It("should reject greater patches", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + + }) + It("should reject greater majors", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + It("should reject greater minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should accept lesser patches", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeTrue(), "should accept 1.16.2") + }) + It("should accept lesser majors", func() { + Expect(sel.Matches(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue(), "should accept 0.16.3") + + }) + It("should accept lesser minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue(), "should accept 1.15.3") + + }) + It("should serialize as other.Major + } + if c.Minor != other.Minor { + return c.Minor > other.Minor + } + return c.Patch > other.Patch +} + +// Matches checks if this version is equal to the other one. +func (c Concrete) Matches(other Concrete) bool { + return c == other +} + +func (c Concrete) String() string { + return fmt.Sprintf("%d.%d.%d", c.Major, c.Minor, c.Patch) +} + +// PatchSelector selects a set of versions where the patch is a wildcard. +type PatchSelector struct { + Major, Minor int + Patch PointVersion +} + +func (s PatchSelector) String() string { + return fmt.Sprintf("%d.%d.%s", s.Major, s.Minor, s.Patch) +} + +// Matches checks if the given version matches this selector. +func (s PatchSelector) Matches(ver Concrete) bool { + return s.Major == ver.Major && s.Minor == ver.Minor && s.Patch.Matches(ver.Patch) +} + +// AsConcrete returns nil if there are wildcards in this selector, +// and the concrete version that this selects otherwise. +func (s PatchSelector) AsConcrete() *Concrete { + if s.Patch == AnyPoint { + return nil + } + + return &Concrete{ + Major: s.Major, + Minor: s.Minor, + Patch: int(s.Patch), // safe to cast, we've just checked wilcards above + } +} + +// TildeSelector selects [X.Y.Z, X.Y+1.0). +type TildeSelector struct { + Concrete +} + +// Matches checks if the given version matches this selector. +func (s TildeSelector) Matches(ver Concrete) bool { + if s.Concrete.Matches(ver) { + // easy, "exact" match + return true + } + return ver.Major == s.Major && ver.Minor == s.Minor && ver.Patch >= s.Patch +} +func (s TildeSelector) String() string { + return "~" + s.Concrete.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s TildeSelector) AsConcrete() *Concrete { + return nil +} + +// LessThanSelector selects versions older than the given one +// (mainly useful for cleaning up). +type LessThanSelector struct { + PatchSelector + OrEquals bool +} + +// Matches checks if the given version matches this selector. +func (s LessThanSelector) Matches(ver Concrete) bool { + if s.Major != ver.Major { + return s.Major > ver.Major + } + if s.Minor != ver.Minor { + return s.Minor > ver.Minor + } + if !s.Patch.Matches(ver.Patch) { + // matches rules out a wildcard, so it's fine to compare as normal numbers + return int(s.Patch) > ver.Patch + } + return s.OrEquals +} +func (s LessThanSelector) String() string { + if s.OrEquals { + return "<=" + s.PatchSelector.String() + } + return "<" + s.PatchSelector.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s LessThanSelector) AsConcrete() *Concrete { + return nil +} + +// AnySelector matches any version at all. +type AnySelector struct{} + +// Matches checks if the given version matches this selector. +func (AnySelector) Matches(_ Concrete) bool { return true } + +// AsConcrete returns nil (this is never a concrete version). +func (AnySelector) AsConcrete() *Concrete { return nil } +func (AnySelector) String() string { return "*" } + +// Selector selects some concrete version or range of versions. +type Selector interface { + // AsConcrete tries to return this selector as a concrete version. + // If the selector would only match a single version, it'll return + // that, otherwise it'll return nil. + AsConcrete() *Concrete + // Matches checks if this selector matches the given concrete version. + Matches(ver Concrete) bool + String() string +} + +// Spec matches some version or range of versions, and tells us how to deal with local and +// remote when selecting a version. +type Spec struct { + Selector + + // CheckLatest tells us to check the remote server for the latest + // version that matches our selector, instead of just relying on + // matching local versions. + CheckLatest bool +} + +// MakeConcrete replaces the contents of this spec with one that +// matches the given concrete version (without checking latest +// from the server). +func (s *Spec) MakeConcrete(ver Concrete) { + s.Selector = ver + s.CheckLatest = false +} + +// AsConcrete returns the underlying selector as a concrete version, if +// possible. +func (s Spec) AsConcrete() *Concrete { + return s.Selector.AsConcrete() +} + +// Matches checks if the underlying selector matches the given version. +func (s Spec) Matches(ver Concrete) bool { + return s.Selector.Matches(ver) +} + +func (s Spec) String() string { + res := s.Selector.String() + if s.CheckLatest { + res += "!" + } + return res +} + +// PointVersion represents a wildcard (patch) version +// or concrete number. +type PointVersion int + +const ( + // AnyPoint matches any point version. + AnyPoint PointVersion = -1 +) + +// Matches checks if a point version is compatible +// with a concrete point version. +// Two point versions are compatible if they are +// a) both concrete +// b) one is a wildcard. +func (p PointVersion) Matches(other int) bool { + switch p { + case AnyPoint: + return true + default: + return int(p) == other + } +} +func (p PointVersion) String() string { + switch p { + case AnyPoint: + return "*" + default: + return strconv.Itoa(int(p)) + } +} + +var ( + // LatestVersion matches the most recent version on the remote server. + LatestVersion = Spec{ + Selector: AnySelector{}, + CheckLatest: true, + } + // AnyVersion matches any local or remote version. + AnyVersion = Spec{ + Selector: AnySelector{}, + } +) diff --git a/tools/setup-envtest/versions/versions_suite_test.go b/tools/setup-envtest/versions/versions_suite_test.go new file mode 100644 index 0000000000..e63c188596 --- /dev/null +++ b/tools/setup-envtest/versions/versions_suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestVersions(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Versions Suite") +} diff --git a/tools/setup-envtest/workflows/workflows.go b/tools/setup-envtest/workflows/workflows.go new file mode 100644 index 0000000000..5c41670a27 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows + +import ( + "context" + "io" + + "github.com/go-logr/logr" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" +) + +// Use is a workflow that prints out information about stored +// version-platform pairs, downloading them if necessary & requested. +type Use struct { + UseEnv bool + AssetsPath string + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Use) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("use")) + env.EnsureBaseDirs(ctx) + if f.UseEnv { + // the the env var unconditionally + if env.PathMatches(f.AssetsPath) { + env.PrintInfo(f.PrintFormat) + return + } + } + env.EnsureVersionIsSet(ctx) + if env.ExistsAndValid() { + env.PrintInfo(f.PrintFormat) + return + } + if env.NoDownload { + envp.Exit(2, "no such version (%s) exists on disk for this architecture (%s) -- try running `list -i` to see what's on disk", env.Version, env.Platform) + } + env.Fetch(ctx) + env.PrintInfo(f.PrintFormat) +} + +// List is a workflow that lists version-platform pairs in the store +// and on the remote server that match the given filter. +type List struct{} + +// Do executes this workflow. +func (List) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("list")) + env.EnsureBaseDirs(ctx) + env.ListVersions(ctx) +} + +// Cleanup is a workflow that removes version-platform pairs from the store +// that match the given filter. +type Cleanup struct{} + +// Do executes this workflow. +func (Cleanup) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("cleanup")) + + env.NoDownload = true + env.ForceDownload = false + + env.EnsureBaseDirs(ctx) + env.Remove(ctx) +} + +// Sideload is a workflow that adds or replaces a version-platform pair in the +// store, using the given archive as the files. +type Sideload struct { + Input io.Reader + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Sideload) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("sideload")) + + env.EnsureBaseDirs(ctx) + env.NoDownload = true + env.Sideload(ctx, f.Input) + env.PrintInfo(f.PrintFormat) +} diff --git a/tools/setup-envtest/workflows/workflows_suite_test.go b/tools/setup-envtest/workflows/workflows_suite_test.go new file mode 100644 index 0000000000..f7e0e92a24 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_suite_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflows_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func TestWorkflows(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Workflows Suite") +} diff --git a/tools/setup-envtest/workflows/workflows_test.go b/tools/setup-envtest/workflows/workflows_test.go new file mode 100644 index 0000000000..352bfab8bb --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_test.go @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "bytes" + "io/fs" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + wf "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +func ver(major, minor, patch int) versions.Concrete { + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func shouldHaveError() { + var err error + var code int + if cause := recover(); envp.CheckRecover(cause, func(caughtCode int, caughtErr error) { + err = caughtErr + code = caughtCode + }) { + panic(cause) + } + Expect(err).To(HaveOccurred(), "should write an error") + Expect(code).NotTo(BeZero(), "should exit with a non-zero code") +} + +const ( + testStorePath = ".teststore" +) + +var _ = Describe("Workflows", func() { + var ( + env *envp.Env + out *bytes.Buffer + server *ghttp.Server + remoteItems []item + ) + BeforeEach(func() { + out = new(bytes.Buffer) + baseFs := afero.Afero{Fs: afero.NewMemMapFs()} + env = &envp.Env{ + Log: testLog, + VerifySum: true, // on by default + FS: baseFs, + Store: &store.Store{Root: afero.NewBasePathFs(baseFs, testStorePath)}, + Out: out, + Platform: versions.PlatformItem{ // default + Platform: versions.Platform{ + OS: "linux", + Arch: "amd64", + }, + }, + Client: &remote.Client{ + Log: testLog.WithName("remote-client"), + Bucket: "kubebuilder-tools-test", // test custom bucket functionality too + Server: "localhost:-1", + Insecure: true, // no https in httptest :-( + }, + } + server = ghttp.NewServer() + env.Client.Server = server.Addr() + + fakeStore(env.FS, testStorePath) + remoteItems = remoteVersions + }) + JustBeforeEach(func() { + handleRemoteVersions(server, remoteItems) + }) + AfterEach(func() { + server.Close() + server = nil + }) + + Describe("use", func() { + var flow wf.Use + BeforeEach(func() { + // some defaults for most tests + env.Version = versions.Spec{ + Selector: ver(1, 16, 0), + } + flow = wf.Use{ + PrintFormat: envp.PrintPath, + } + }) + + It("should initialize the store if it doesn't exist", func() { + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + // need to set this to a valid remote version cause our store is now empty + env.Version = versions.Spec{Selector: ver(1, 16, 4)} + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + + Context("when use env is set", func() { + BeforeEach(func() { + flow.UseEnv = true + }) + It("should fall back to normal behavior when the env is not set", func() { + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should fall back to normal behavior if binaries are missing", func() { + flow.AssetsPath = ".teststore/missing-binaries" + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should use the value of the env if it contains the right binaries", func() { + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not try and check the version of the binaries", func() { + flow.AssetsPath = ".teststore/wrong-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not need to contact the network", func() { + server.Close() + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + // expect to not get a panic -- if we do, it'll cause the test to fail + }) + }) + + Context("when downloads are disabled", func() { + BeforeEach(func() { + env.NoDownload = true + server.Close() + }) + + // It("should not contact the network") is a gimme here, because we + // call server.Close() above. + + It("should error if no matches are found locally", func() { + defer shouldHaveError() + env.Version.Selector = versions.Concrete{Major: 9001} + flow.Do(env) + }) + It("should settle for the latest local match if latest is requested", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, + Minor: 16, + Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on "server" is 1.16.4, shouldn't use that + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + }) + + Context("if latest is requested", func() { + It("should contact the network to see if there's anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + }, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.4-linux-amd64"), "should use the latest remote version") + }) + It("should still use the latest local if the network doesn't have anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 14, Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on the server is 1.14.1, latest local is 1.14.26 + Expect(out.String()).To(HaveSuffix("/1.14.26-linux-amd64"), "should use the latest local version") + }) + }) + + It("should check local for a match first", func() { + server.Close() // confirm no network + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 16, 0)}, + } + flow.Do(env) + // latest on the server is 1.16.4, latest local is 1.16.1 + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + + It("should fall back to the network if no local matches are found", func() { + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 19, 0)}, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.19.2-linux-amd64"), "should have a remote version") + }) + + It("should error out if no matches can be found anywhere", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(0, 0, 1)}, + } + flow.Do(env) + }) + + It("should skip local versions matches with non-matching platforms", func() { + env.NoDownload = true // so we get an error + defer shouldHaveError() + env.Version = versions.Spec{ + // has non-matching local versions + Selector: ver(1, 13, 0), + } + + flow.Do(env) + }) + + It("should skip remote version matches with non-matching platforms", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + // has a non-matching remote version + Selector: versions.TildeSelector{Concrete: ver(1, 11, 1)}, + } + flow.Do(env) + }) + + Describe("verifying the checksum", func() { + BeforeEach(func() { + remoteItems = append(remoteItems, item{ + meta: bucketObject{ + Name: "kubebuilder-tools-86.75.309-linux-amd64.tar.gz", + Hash: "nottherightone!", + }, + contents: remoteItems[0].contents, // need a valid tar.gz file to not error from that + }) + env.Version = versions.Spec{ + Selector: ver(86, 75, 309), + } + }) + Specify("when enabled, should fail if the downloaded md5 checksum doesn't match", func() { + defer shouldHaveError() + flow.Do(env) + }) + Specify("when disabled, shouldn't check the checksum at all", func() { + env.VerifySum = false + flow.Do(env) + }) + }) + }) + + Describe("list", func() { + // split by fields so we're not matching on whitespace + listFields := func() [][]string { + resLines := strings.Split(strings.TrimSpace(out.String()), "\n") + resFields := make([][]string, len(resLines)) + for i, line := range resLines { + resFields[i] = strings.Fields(line) + } + return resFields + } + + Context("when downloads are disabled", func() { + BeforeEach(func() { + server.Close() // ensure no network + env.NoDownload = true + }) + It("should include local contents sorted by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + })) + }) + It("should skip non-matching local contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + })) + + }) + }) + Context("when downloads are enabled", func() { + Context("when sorting", func() { + BeforeEach(func() { + // shorten the list a bit for expediency + remoteItems = remoteItems[:7] + }) + It("should sort local & remote by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + {"(available)", "v1.11.1", "potato/cherrypie"}, + {"(available)", "v1.11.0", "darwin/amd64"}, + {"(available)", "v1.11.0", "linux/amd64"}, + {"(available)", "v1.10.1", "darwin/amd64"}, + {"(available)", "v1.10.1", "linux/amd64"}, + })) + + }) + }) + It("should skip non-matching remote contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(available)", "v1.16.4", "linux/amd64"}, + })) + + }) + }) + }) + + Describe("cleanup", func() { + BeforeEach(func() { + server.Close() // ensure no network + flow := wf.Cleanup{} + env.Version = versions.AnyVersion + env.Platform.Arch = "*" + flow.Do(env) + }) + + It("should remove matching versions from the store & keep non-matching ones", func() { + entries, err := env.FS.ReadDir(".teststore/k8s") + Expect(err).NotTo(HaveOccurred(), "should be able to read the store") + Expect(entries).To(ConsistOf( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("sideload", func() { + var ( + flow wf.Sideload + // remote version fake contents are prefixed by the + // name for easier debugging, so we can use that here + expectedPrefix = remoteVersions[0].meta.Name + ) + BeforeEach(func() { + server.Close() // ensure no network + flow.Input = bytes.NewReader(remoteVersions[0].contents) + flow.PrintFormat = envp.PrintPath + }) + It("should initialize the store if it doesn't exist", func() { + env.Version.Selector = ver(1, 10, 0) + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + It("should fail if a non-concrete version is given", func() { + defer shouldHaveError() + env.Version = versions.LatestVersion + flow.Do(env) + }) + It("should fail if a non-concrete platform is given", func() { + defer shouldHaveError() + env.Version.Selector = ver(1, 10, 0) + env.Platform.Arch = "*" + flow.Do(env) + }) + It("should load the given gizipped tarball into our store as the given version", func() { + env.Version.Selector = ver(1, 10, 0) + flow.Do(env) + baseName := env.Platform.BaseName(*env.Version.AsConcrete()) + expectedPath := filepath.Join(".teststore/k8s", baseName, "some-file") + outContents, err := env.FS.ReadFile(expectedPath) + Expect(err).NotTo(HaveOccurred(), "should be able to load the unzipped file") + Expect(string(outContents)).To(HavePrefix(expectedPrefix), "should have the debugging prefix") + }) + }) +}) diff --git a/tools/setup-envtest/workflows/workflows_testutils_test.go b/tools/setup-envtest/workflows/workflows_testutils_test.go new file mode 100644 index 0000000000..c50b6f50ae --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_testutils_test.go @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" //nolint:gosec + "encoding/base64" + "math/rand" + "net/http" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var ( + remoteNames = []string{ + "kubebuilder-tools-1.10-darwin-amd64.tar.gz", + "kubebuilder-tools-1.10-linux-amd64.tar.gz", + "kubebuilder-tools-1.10.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.10.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.11.0-darwin-amd64.tar.gz", + "kubebuilder-tools-1.11.0-linux-amd64.tar.gz", + "kubebuilder-tools-1.11.1-potato-cherrypie.tar.gz", + "kubebuilder-tools-1.12.3-darwin-amd64.tar.gz", + "kubebuilder-tools-1.12.3-linux-amd64.tar.gz", + "kubebuilder-tools-1.13.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.13.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.14.1-darwin-amd64.tar.gz", + "kubebuilder-tools-1.14.1-linux-amd64.tar.gz", + "kubebuilder-tools-1.15.5-darwin-amd64.tar.gz", + "kubebuilder-tools-1.15.5-linux-amd64.tar.gz", + "kubebuilder-tools-1.16.4-darwin-amd64.tar.gz", + "kubebuilder-tools-1.16.4-linux-amd64.tar.gz", + "kubebuilder-tools-1.17.9-darwin-amd64.tar.gz", + "kubebuilder-tools-1.17.9-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.0-darwin-amd64.tar.gz", + "kubebuilder-tools-1.19.0-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.2-darwin-amd64.tar.gz", + "kubebuilder-tools-1.19.2-linux-amd64.tar.gz", + "kubebuilder-tools-1.19.2-linux-arm64.tar.gz", + "kubebuilder-tools-1.19.2-linux-ppc64le.tar.gz", + "kubebuilder-tools-1.20.2-darwin-amd64.tar.gz", + "kubebuilder-tools-1.20.2-linux-amd64.tar.gz", + "kubebuilder-tools-1.20.2-linux-arm64.tar.gz", + "kubebuilder-tools-1.20.2-linux-ppc64le.tar.gz", + "kubebuilder-tools-1.9-darwin-amd64.tar.gz", + "kubebuilder-tools-1.9-linux-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-darwin-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-amd64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-arm64.tar.gz", + "kubebuilder-tools-v1.19.2-linux-ppc64le.tar.gz", + } + + remoteVersions = makeContents(remoteNames) + + // keep this sorted. + localVersions = []versions.Set{ + {Version: ver(1, 17, 9), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 2), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 1), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 0), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 14, 26), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + }}, + } +) + +type item struct { + meta bucketObject + contents []byte +} + +// objectList is the parts we need of the GCS "list-objects-in-bucket" endpoint. +type objectList struct { + Items []bucketObject `json:"items"` +} + +// bucketObject is the parts we need of the GCS object metadata. +type bucketObject struct { + Name string `json:"name"` + Hash string `json:"md5Hash"` +} + +func makeContents(names []string) []item { + res := make([]item, len(names)) + for i, name := range names { + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], name) + if _, err := rand.Read(chunk[len(name):]); err != nil { //nolint:gosec + panic(err) + } + res[i] = verWith(name, chunk[:]) + } + return res +} + +func verWith(name string, contents []byte) item { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + err := tarWriter.WriteHeader(&tar.Header{ + Name: "kubebuilder/bin/some-file", + Size: int64(len(contents)), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(contents) + if err != nil { + panic(err) + } + tarWriter.Close() + gzipWriter.Close() + res := item{ + meta: bucketObject{Name: name}, + contents: out.Bytes(), + } + hash := md5.Sum(res.contents) //nolint:gosec + res.meta.Hash = base64.StdEncoding.EncodeToString(hash[:]) + return res +} + +func handleRemoteVersions(server *ghttp.Server, versions []item) { + list := objectList{Items: make([]bucketObject, len(versions))} + for i, ver := range versions { + ver := ver // copy to avoid capturing the iteration variable + list.Items[i] = ver.meta + server.RouteToHandler("GET", "/storage/v1/b/kubebuilder-tools-test/o/"+ver.meta.Name, func(resp http.ResponseWriter, req *http.Request) { + if req.URL.Query().Get("alt") == "media" { + resp.WriteHeader(http.StatusOK) + Expect(resp.Write(ver.contents)).To(Equal(len(ver.contents))) + } else { + ghttp.RespondWithJSONEncoded( + http.StatusOK, + ver.meta, + )(resp, req) + } + }) + } + server.RouteToHandler("GET", "/storage/v1/b/kubebuilder-tools-test/o", ghttp.RespondWithJSONEncoded( + http.StatusOK, + list, + )) +} + +func fakeStore(fs afero.Afero, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, set := range localVersions { + for _, plat := range set.Platforms { + Expect(fs.Mkdir(filepath.Join(unpackedBase, plat.BaseName(set.Version)), 0755)).To(Succeed()) + } + } + + By("making some fake non-store paths") + Expect(fs.Mkdir(filepath.Join(dir, "missing-binaries"), 0755)) + + Expect(fs.Mkdir(filepath.Join(dir, "wrong-version"), 0755)) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "etcd"), nil, 0755)).To(Succeed()) + + Expect(fs.Mkdir(filepath.Join(dir, "good-version"), 0755)) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "etcd"), nil, 0755)).To(Succeed()) + // TODO: put the right files +}