diff --git a/.gitignore b/.gitignore index 3c6651d7b95..07612548271 100644 --- a/.gitignore +++ b/.gitignore @@ -82,3 +82,9 @@ tags # Finder Desktop Service .DS_Store + +# Jekyll / site specific +_site/ +.sass-cache/ +.jekyll-cache/ +.jekyll-metadata diff --git a/.travis.yml b/.travis.yml index 68081c9cf09..f7a137b47e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,20 +3,13 @@ sudo: required language: go -env: - global: - - MINIKUBE_WANTUPDATENOTIFICATION=false - - MINIKUBE_WANTREPORTERRORPROMPT=false - - MINIKUBE_HOME=$HOME - - CHANGE_MINIKUBE_NONE_USER=true - - KUBECONFIG=$HOME/.kube/config jobs: include: # YAML alias, for settings shared across the tests - &base-test stage: test go_import_path: github.com/openshift/odo - go: "1.13.1" + go: "1.12.x" install: - make goget-tools script: @@ -38,7 +31,7 @@ jobs: init: - git config --system core.longpaths true go_import_path: github.com/openshift/odo - go: "1.13.1" + go: "1.12.x" install: - systeminfo.exe | grep '^OS' - choco install make @@ -57,7 +50,7 @@ jobs: os: - osx go_import_path: github.com/openshift/odo - go: "1.13.1" + go: "1.12.x" install: - make goget-tools script: @@ -93,6 +86,7 @@ jobs: - travis_wait make test-cmd-url - travis_wait make test-cmd-devfile-url - travis_wait make test-cmd-debug + - travis_wait make test-cmd-devfile-debug - odo logout # Run service-catalog e2e tests @@ -135,7 +129,7 @@ jobs: - ./scripts/oc-cluster.sh - make bin - sudo cp odo /usr/bin - - travis_wait make test-cmd-docker-devfile-url + - travis_wait make test-cmd-docker-devfile-url-pushtarget # These tests need cluster login as they will be interacting with a Kube environment - odo login -u developer - travis_wait make test-cmd-devfile-catalog @@ -162,18 +156,26 @@ jobs: - <<: *base-test stage: test - name: "docker devfile push, url, catalog, delete command integration tests" + name: "docker devfile push, watch, catalog and delete command integration tests" script: - make bin - sudo cp odo /usr/bin - travis_wait make test-cmd-docker-devfile-push + - travis_wait make test-cmd-docker-devfile-watch - travis_wait make test-cmd-docker-devfile-catalog - travis_wait make test-cmd-docker-devfile-delete + - travis_wait make test-cmd-docker-devfile-url # Run devfile integration test on Kubernetes cluster - <<: *base-test stage: test - name: "devfile catalog and watch command integration tests on kubernetes cluster" + name: "devfile catalog, watch, push, debug, delete and create command integration tests on kubernetes cluster" + env: + - MINIKUBE_WANTUPDATENOTIFICATION=false + - MINIKUBE_WANTREPORTERRORPROMPT=false + - MINIKUBE_HOME=$HOME + - CHANGE_MINIKUBE_NONE_USER=true + - KUBECONFIG=$HOME/.kube/config before_script: # Download kubectl, a cli tool for accessing Kubernetes cluster - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.16.0/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ @@ -183,6 +185,8 @@ jobs: - touch $KUBECONFIG - sudo minikube start --vm-driver=none --kubernetes-version=v1.16.0 - "sudo chown -R travis: /home/travis/.minikube/" + - sudo apt-get -qq update + - sudo apt-get install -y socat script: - kubectl cluster-info - make bin @@ -190,3 +194,7 @@ jobs: - export KUBERNETES=true - travis_wait make test-cmd-devfile-catalog - travis_wait make test-cmd-devfile-watch + - travis_wait make test-cmd-devfile-push + - travis_wait make test-cmd-devfile-debug + - travis_wait make test-cmd-devfile-delete + - travis_wait make test-cmd-devfile-create diff --git a/.wwhrd.yml b/.wwhrd.yml index af5657326a4..4297d1cf442 100644 --- a/.wwhrd.yml +++ b/.wwhrd.yml @@ -37,4 +37,6 @@ exceptions: # BSD licence - wwhrd is not detecting it correctly - github.com/golang/protobuf/... # MIT licence - wwhrd is not detecting it correctly - - sigs.k8s.io/yaml/... \ No newline at end of file + - sigs.k8s.io/yaml/... + # Apache License 2.0 - wwhrd is not detecting it correctly + - knative.dev/pkg/test/... \ No newline at end of file diff --git a/Dockerfile.rhel b/Dockerfile.rhel index f84a1723a82..884c3e2cb46 100644 --- a/Dockerfile.rhel +++ b/Dockerfile.rhel @@ -1,7 +1,7 @@ # This Dockerfile builds an image containing the Linux, Mac and Windows version of odo # layered on top of the ubi7/ubi image. -FROM registry.svc.ci.openshift.org/openshift/release:golang-1.11 AS builder +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 AS builder COPY . /go/src/github.com/openshift/odo WORKDIR /go/src/github.com/openshift/odo @@ -16,7 +16,7 @@ LABEL com.redhat.component=atomic-openshift-odo-cli-artifacts-container \ summary="This image contains the Linux, Mac and Windows version of odo" # Change version as needed. Note no "-" is allowed -LABEL version=1.2.1 +LABEL version=1.2.3 COPY --from=builder /go/src/github.com/openshift/odo/dist/bin/darwin-amd64/odo /usr/share/openshift/odo/mac/odo COPY --from=builder /go/src/github.com/openshift/odo/dist/bin/windows-amd64/odo.exe /usr/share/openshift/odo/windows/odo.exe diff --git a/Makefile b/Makefile index 3914e3da1cf..975dce9c1fa 100644 --- a/Makefile +++ b/Makefile @@ -238,11 +238,22 @@ test-cmd-url: test-cmd-devfile-url: ginkgo $(GINKGO_FLAGS) -focus="odo devfile url command tests" tests/integration/devfile/ +# Run odo debug devfile command tests +.PHONY: test-cmd-devfile-debug +test-cmd-devfile-debug: + ginkgo $(GINKGO_FLAGS) -focus="odo devfile debug command tests" tests/integration/devfile/ + ginkgo $(GINKGO_FLAGS_SERIAL) -focus="odo devfile debug command serial tests" tests/integration/devfile/debug + # Run odo push docker devfile command tests .PHONY: test-cmd-docker-devfile-push test-cmd-docker-devfile-push: ginkgo $(GINKGO_FLAGS) -focus="odo docker devfile push command tests" tests/integration/devfile/docker/ +# Run odo watch docker devfile command tests +.PHONY: test-cmd-docker-devfile-watch +test-cmd-docker-devfile-watch: + ginkgo $(GINKGO_FLAGS) -focus="odo docker devfile watch command tests" tests/integration/devfile/docker/ + # Run odo url docker devfile command tests .PHONY: test-cmd-docker-devfile-url test-cmd-docker-devfile-url: @@ -258,6 +269,11 @@ test-cmd-docker-devfile-delete: test-cmd-docker-devfile-catalog: ginkgo $(GINKGO_FLAGS) -focus="odo docker devfile catalog command tests" tests/integration/devfile/docker/ +# Run odo url docker devfile command tests +.PHONY: test-cmd-docker-devfile-url-pushtarget +test-cmd-docker-devfile-url-pushtarget: + ginkgo $(GINKGO_FLAGS) -focus="odo docker devfile url pushtarget command tests" tests/integration/devfile/docker/ + # Run odo watch command tests .PHONY: test-cmd-watch test-cmd-watch: @@ -334,4 +350,4 @@ openshiftci-presubmit-unittests: # Run OperatorHub tests .PHONY: test-operator-hub test-operator-hub: - ginkgo $(GINKGO_FLAGS) -focus="odo service command tests" tests/integration/operatorhub/ + ginkgo $(GINKGO_FLAGS_SERIAL) -focus="odo service command tests" tests/integration/operatorhub/ diff --git a/OWNERS b/OWNERS index b9381f7f925..044b31f2ce5 100644 --- a/OWNERS +++ b/OWNERS @@ -4,6 +4,8 @@ approvers: - kadel - girishramnani - cdrage +- dharmit +- mik-dass reviewers: - girishramnani diff --git a/README.adoc b/README.adoc index e3e91ae9cd0..41b9d47dec1 100644 --- a/README.adoc +++ b/README.adoc @@ -1,6 +1,3 @@ -+++ - -+++ [id="readme"] = `odo` - Developer-focused CLI for OpenShift and Kubernetes :toc: macro @@ -39,21 +36,6 @@ Existing tools such as `oc` are more operations-focused and require a deep-under |=== |Language |Container Image |Supported Package Manager |*Node.js* -|https://github.com/sclorg/s2i-nodejs-container[centos/nodejs-8-centos7] -|NPM - -| |https://access.redhat.com/articles/3376841[rhoar-nodejs/nodejs-8] -|NPM - -| -|https://www.github.com/bucharest-gold/centos7-s2i-nodejs[bucharestgold/centos7-s2i-nodejs] -|NPM - -| -|https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/nodejs-8-rhel7[rhscl/nodejs-8-rhel7] -|NPM - -| |https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/nodejs-10-rhel7[rhscl/nodejs-10-rhel7] |NPM diff --git a/build/VERSION b/build/VERSION index 99a4aef0c4d..cc904638af8 100644 --- a/build/VERSION +++ b/build/VERSION @@ -1 +1 @@ -v1.1.3 +v1.2.2 diff --git a/docs/dev/development.adoc b/docs/dev/development.adoc index 49f2ffeec87..3f7aa498784 100644 --- a/docs/dev/development.adoc +++ b/docs/dev/development.adoc @@ -9,10 +9,21 @@ toc::[] Requires *Go 1.12* -**WARNING**: If you are adding any features that require a higher version of golang, such as golang 1.13 -for example, please contact maintainers to check of the releasing systems can handle the newer versions. +Testing and release builds happen with the above version. Developers are advised to stick to this version if they can but it is not compulsory. -If that is ok, please ensure you update the required golang version, both here and in the file link:/scripts/rpm-prepare.sh[`scripts/rpm-prepare.sh`] + +[WARNING] +==== +If you are adding any features that require a higher version of golang, than the one mentioned above, please contact the maintainers in order to check if the releasing systems can handle the newer version. If that is ok, please ensure you update the required golang version, both here and in the files below, in your PR. + +.List of files to update for golang version + * link:/scripts/rpm-prepare.sh[`scripts/rpm-prepare.sh`] + * link:/.travis.yml[`.travis.yml`] + * link:/Dockerfile.rhel[`Dockerfile.rhel`] + * link:/openshift-ci/build-root/Dockerfile[`openshift-ci/build-root/Dockerfile`] +==== + +First setup your fork of the odo project, following the steps below . link:https://help.github.com/en/articles/fork-a-repo[Fork] the link:https://github.com/openshift/odo[`odo`] repository. @@ -178,15 +189,28 @@ NOTE: Refer https://github.com/golang/go/wiki/LearnTesting for Go best practices === Integration and e2e tests -*Prerequisites:* +*Prerequisites for OpenShift cluster:* -* A `minishift` or OpenShift environment with Service Catalog enabled: +* A `minishift` or OpenShift 3.11 environment with Service Catalog enabled: + ---- $ MINISHIFT_ENABLE_EXPERIMENTAL=y minishift start --extra-clusterup-flags "--enable=*,service-catalog,automation-service-broker,template-service-broker" ---- +OR +* A `crc` environment for 4.* local cluster: ++ +Follow link:https://github.com/code-ready/crc#documentation[`crc`] installation guide. ++ +OR +* A 4.* cluster hosted remotely -* `odo` and `oc` binaries in `$PATH`. +*Prerequisites for Kubernetes cluster:* + +* A `kubernetes` environment set up with single node cluster: ++ +For a single node `kubernetes` cluster install link:https://kubernetes.io/docs/tasks/tools/install-minikube/[`Minikube`] + +NOTE: Make sure that `odo` and `oc` binaries are in `$PATH`. Use the cloned odo directory to launch tests on 3.11 and 4.* clusters. To communicate with `Kubernetes` cluster use `kubectl`. No cluster configuration is needed to run tests on a 3.11 cluster spun up by `Minishift` or `oc cluster up` locally. However a 4.* cluster needs to be configured before launching the tests against it. The files `kubeadmin-password` and `kubeconfig` which contain cluster login details should be present in the `auth` directory and it should reside in the same directory as `Makefile`. If it is not present in the auth directory, please create it. Then run `make configure-installer-tests-cluster` to configure the 4.* cluster. *Integration tests:* @@ -331,9 +355,9 @@ There are some test environment variable that helps to get more control over the * UNIT_TEST_ARGS: Env variable UNIT_TEST_ARGS is used to get control over enabling test flags along with go test. For example, To enable verbosity export or set env UNIT_TEST_ARGS like `UNIT_TEST_ARGS=-v`. -*Running integration tests:* +*Running integration tests on Openshift:* -By default, tests are run against the `odo` binary placed in the PATH which is created by command `make`. Integration tests can be run in two (parallel and sequential) ways. To control the parallel run use environment variable `TEST_EXEC_NODES`. For example component test can be run +For running tests on a 3.11 cluster, login to the cluster using the required credentials. For example `odo login -u -p `. In case of 4.* cluster, `make configure-installer-tests-cluster` performs the login operation required to run the test. By default, the tests are run against the `odo` binary placed in the $PATH which is created by the command `make`. Integration tests can be run in two ways, parallel and sequential. To control the parallel run, use the environment variable `TEST_EXEC_NODES`. For example, the component test can be run as following: * To run the test in parallel, on a test cluster (By default the test will run in parallel on two ginkgo test node): @@ -353,7 +377,55 @@ Run component command integration tests $ TEST_EXEC_NODES=1 make test-cmd-cmp ---- -NOTE: To see the number of available integration test file for validation, press `tab` just after writing `make test-cmd-`. However there is a test file `generic_test.go` which handles certain test spec easily and can run the spec in parallel by calling `make test-generic`. By calling make `test-integration`, the whole suite can run all the spec in parallel on two ginkgo test node except `service` and `link` irrespective of service catalog status in the cluster. However `make test-integration-service-catalog` runs all spec of service and link tests successfully in parallel on cluster having service catalog enabled. `make test-odo-login-e2e` doesn't honour environment variable `TEST_EXEC_NODES`. So by default it runs login and logout command integration test suite on a single ginkgo test node sequentially to avoid race conditions in a parallel run. +NOTE: To see the number of available integration test file for validation, press `tab` just after writing `make test-cmd-`. However there is a test file `generic_test.go` which handles certain test specs easily and we can run it parallelly by calling `make test-generic`. By calling `make test-integration`, the whole suite will run all the specs in parallel on two ginkgo test node except `service` and `link` irrespective of service catalog status in the cluster. However `make test-integration-service-catalog` runs all specs of service and link tests in parallel on a cluster having service catalog enabled. `make test-odo-login-e2e` doesn't honour environment variable `TEST_EXEC_NODES`. So by default it runs login and logout command integration test suites on a single ginkgo test node sequentially to avoid race conditions during a parallel run. + +*Running integration tests on Kubernetes:* + +By default, the link:https://github.com/openshift/odo/tree/master/tests/integration/devfile[`integration tests`] for devfile feature, which is in experimental mode, run against `kubernetes` cluster. For more information on Experimental mode, please read link:https://github.com/openshift/odo/blob/master/docs/dev/experimental-mode.adoc:[`odo experimental mode`] document. + +The tests are run against the `odo` binary placed in the PATH which is created by the command `make`. Integration tests can be run in two ways (parallel and sequential). To control the parallel run use environment variable `TEST_EXEC_NODES`. For example, the devfile tests can be run + +* To run the tests on Kubernetes cluster: + ++ +Set the `KUBERNETES` environment variable ++ +---- +$ export KUBERNETES=true +---- + ++ +Enable the experimental mode ++ +---- +$ export ODO_EXPERIMENTAL=true +---- ++ +OR ++ +---- +$ odo preference set Experimental true -f +---- + +* To run the test in parallel, on a test cluster (By default the test will run in parallel on two ginkgo test node): + ++ +Run catalog command integration tests ++ +---- +$ make test-cmd-devfile-catalog +---- ++ + +* To run the catalog command integration tests sequentially or on single ginkgo test node: ++ +Run catalog command integration tests ++ +---- +$ TEST_EXEC_NODES=1 make test-cmd-devfile-catalog +---- + +NOTE: To see the number of available integration test files for validation, press `tab` keb just after writing `make test-cmd-devfile-`. By calling `make test-integration-devfile`, the suite will run all test specs in parallel on two ginkgo test nodes. *Running e2e tests:* @@ -449,7 +521,6 @@ To release a new version on GitHub: * Updates the version in the following files: ** link:/pkg/version/version.go[`pkg/version/version.go`] -** link:/scripts/installer.sh[`scripts/installer.sh`] ** link:/Dockerfile.rhel[`Dockerfile.rhel`] ** link:/scripts/rpm-prepare.sh[`scripts/rpm-prepare.sh`] diff --git a/docs/dev/experimental-mode.adoc b/docs/dev/experimental-mode.adoc index de30022eb61..8bb005ea2cf 100644 --- a/docs/dev/experimental-mode.adoc +++ b/docs/dev/experimental-mode.adoc @@ -105,7 +105,7 @@ Applying URL changes ✓ Changes successfully pushed to component ``` -By default, `odo push` executes `devbuild` and `devrun` devfile commands. However, custom commands can be also be provided to `odo push` via flags `--build-command` & `--run-command`. `odo push` also provides a `--devfile` flag to execute push with a `devfile.yaml` in a custom path. These flags are only available in the experimental mode. +By default, `odo push` executes `devbuild` and `devrun` devfile commands. However, custom commands can be also be provided to `odo push` via flags `--build-command` & `--run-command`. These flags are only available in the experimental mode. ##### Deleting a devfile component: Delete a devfile component with `odo delete`. This deletes all the Kubernetes resources created during `odo push`. Use the `-all` flag to delete the Kubernetes resources and the local config at `.odo/env/env.yaml` diff --git a/docs/img/openshift.png b/docs/img/openshift.png deleted file mode 100644 index 3b4316e39df..00000000000 Binary files a/docs/img/openshift.png and /dev/null differ diff --git a/docs/proposals/event-notification.md b/docs/proposals/event-notification.md new file mode 100644 index 00000000000..28f428a54aa --- /dev/null +++ b/docs/proposals/event-notification.md @@ -0,0 +1,211 @@ +# ODO build and application status notification + +## Summary + +With this proposal and [it's related issue](https://github.com/openshift/odo/issues/2550) we examine how to consume build/run event output from odo in order to allow external tools (such as IDEs) to determine the application/build status of an odo-managed application. + +In short, the proposal is: +- New flag for push command, `odo push -o json`: Outputs JSON events that correspond to what devfile actions/commands are being executed by push. +- New odo command `odo component status -o json`: Calls `supervisord ctl status` to check the running container processes, checks the container/pod status every X seconds (and/or a watch, for Kubernetes case), and sends an HTTP/S request to the application URL. The result of those is output as JSON to be used by external tools to determine if the application is running. +- A standardized markup format for communicating detailed build status, something like: `#devfile-status# {"buildStatus":"Compiling application"}`, to be optionally included in devfile scripts that wish to provide detailed build status to consuming tools. + +## Terminology + +With this issue, we are looking at adding odo support for allowing external tools to gather build status and application status. We further divide both statuses into detailed and non-detailed, with detailed principally being related to statuses that can be determine by looking at container logs. + +**Build status**: A simple build status indicating whether the build is running (y/n), and whether the last build succeeded or failed. This can be determined based on whether odo is running a dev file action/command, and the process error code (0 = success, non-0 = failed) of that action/command. + +**Detailed build status**: An indication of which step in the build process the build is in. For example: are we 'Compiling application', or are we 'Running unit tests'? + +**App status**: Determine if an application is running, using container status (for both local and Kubernetes, various status: containercreating, started, restarting, etc), `supervisord ctl status`, or whether an HTTP/S request to the root application URL returns an HTTP response with any status code. + +**Detailed application status**: +- While app status works well for standalone application frameworks (Go, Node Express, Spring Boot), it works less well for full server runtimes such as Java EE application servers like OpenLiberty/WildFly that may begin responding to Web requests before the user's deployed WAR/EAR application has finished startup. +- Since these application servers are built to serve multiple concurrently-deployed applications, it is more difficult to determine the status of any specific application running on them. The lifecycle of the application server differs from the lifecycle of the application running inside the application server. +- Fortunately, in these cases the IDE/consuming tool can use the console logs (from `odo log`) from the runtime container to determine a more detailed application status. +- For example, OpenLiberty (as an example of an application-server-style container) prints a specific code when an application is starting `CWWKZ0018I: Starting application {0}.`, and another when it has started. `CWWKZ0001I: Application {0} started in {1} seconds.` +- Odo itself should NOT know anything about these specific application codes; knowing how these translate into a detailed application status would be the responsibility of the IDE/consuming tool. Odo's role here is only to provide the console output log. +- In the future, we could add these codes into the devfile to give Odo itself some agency over determining the detailed application status, but for this proposal responsibility is left with the consuming tool. + +**Devfile writer**: A devfile writer may be a runtime developer (for example, a Red-Hatter working on WildFly or an IBMer working on OpenLibery) creating a devfile for their organization's runtime (for example 'OpenLiberty w/ Maven' dev file), or an application developer creating/customizing a dev file for use with their own application. In either case, the devfile writer must be familiar with the semantics of both odo and the devfile. + +## JSON-based odo command behaviours to detect app and build status + +New odo commands and flags: +- `odo push -o json` +- `odo component status -o json` + +With these two additions, an IDE or similar external tool can detect build running/succeeded/failed, application starting/started/stopping/stopped, and (in many cases) get a 'detailed app status' and/or 'detailed build status'. + +### Build status notification via `odo push -o json` + +`odo push -o json` is like standard `odo push`, but instead it outputs JSON events (including action console output) instead of text. This allows the internal state of the odo push process to be more easily consumed by external tools. + +Several different types of JSON-formatted events would be output, which correspond to odo container command/action executions: +- Dev file command execution begun *(command name, start timestamp)* +- Dev file command execution completed *(error code, end timestamp)* +- Dev file action execution begun *(action name, parent command name, start timestamp)* +- Dev file action execution completed *(action name, error code, end timestamp)* +- Log/console stdout from the actions, one line at a time *(for example, `mvn build` output).* (timestamp) + +(Exact details for which fields are included with events are TBD) + +In addition, `odo push -o json` should return a non-zero error code if one of the actions returned a non-zero error code, otherwise zero is returned. + +### `odo push -o json` example output + +This is what an `odo push -o json` command invocation would look like: +``` +odo push -o json +{ "devFileCommandExecutionBegun": { "commandName" : "build", "timestamp" : "(UTC unix epoch seconds.microseconds)" } } +{ "devFileActionExecutionBegun" : { "commandName" : "build", "actionName" : "run-build-script", "timestamp" : "(...)" } } +{ "logText" : { "text:" "one line of text received\\n another line of text received", "timestamp" : "(...)" } } +{ "devFileActionExecutionComplete" : { "errorCode" : 1, ( same as above )} } +{ "logText" : { "text": (... ), "timestamp" : "(...)" } } # Log text is interleaved with events +{ "devFileCommandExecutionComplete": { "success" : false, (same as above) } } + +(Exact details on event name, and JSON format are TBD; feedback welcome!) +``` + +These events allow an external odo-consuming tool to determine the build status of an application (build succeeded, build failed, build not running). + +Note that unlike other machine-readable outputs used in odo, each individual line is a fully complete and parseable JSON document, allowing events to be streamed and processed by the consuming tool one-at-a-time, rather than waiting for all the events to be received before being parseable (which would be required if the entire console output was one single JSON document, as is the case for other odo machine-readable outputs.) + +### Detailed build status via JSON+custom markup + +For detailed build status, it is proposed that devfile writers may *optionally* include custom markup in their devfile actions which indicate a detailed build status: +- If a dev file writer wanted to communicate that the current command/action were compiling the application, they would insert a specific markup string (`#devfile-status#`) at the beginning of a console-outputted line, and then between those two fields would be a JSON object with a single field `buildStatus`: + - For example: `#devfile-status# {"buildStatus":"Compiling application"}` would then communicate that the detailed build status should be set to `Compiling application`. +- Since this line would be output as container stdout, it would be included as a `logText` JSON event, and the consuming tool can look for this markup string and parse the simple JSON to extract the detailed build status. +- Feedback welcome around exact markup text format. + +The build step (running as a bash script, for example, invoked via an action) of a devfile might then look like this: +``` +#!/bin/bash +(...) +echo "#devfile-status# {'buildStatus':'Compiling application'} +mvn compile +echo "#devfile-status# {'buildStatus':'Running unit tests'} +mvn test +``` + +This 'detailed build status' markup text is *entirely optional*: if this markup is not present, the odo tool can still determine build succeeded/failed and build running/not-running using the other `odo push -o json` JSON events. + +### App status notification via `odo component status -o json` and `odo log --follow` + +In general, within the execution context that odo operates, there are a few ways for us to determine the application status: +1) Will the application respond to an HTTP/S request sent to its exposed URL? +2) What state is the container in? (running/container creating/restarting/etc -- different statuses between local and Kube but same general idea) +3) Are the container processes running that are managed by supervisord? We check this by calling `supervisord ctl status`. +4) In the application log, specific hardcoded text strings can be searched for (for example, OpenLiberty outputs defined status codes to its log to indicate that an app started.) But, note that we definitely don't want to hardcode specific text strings into ODO: instead, this proposal leaves it up to the IDE to process the output from the `odo log` command. Since the `odo log` command output would contain the application text, IDEs can provide their own mechanism to determine status for supported devfiles (and in the future we may wish to add new devfile elements for these strings, to allow odo to do this as well). + +Ideally, we would like for odo to provide consuming tools with all 4 sets of data. Thus, as proposed: +- 1, 2 and 3 are handled by a new `odo component status -o json` command, described here. +- 4 is handled by the existing unmodified `odo log --follow` command. + +The new proposed `odo component status -o json` command will: +- Be a *long-running* command that will continue outputing status until it is aborted by the parent process. +- Every X seconds, send an HTTP/S request to the URLs/routes of the application as they existed when the command was first executed. Output the result as a JSON string. +- Every X seconds (or using a Kubernetes watch, where appropriate), check the container status for the application, based on the application data that was present when the command was first issued. Output the result as a JSON string. +- Every X seconds call `supervisord ctl status` within the container and report the status of supervisord's managed processes. + +**Note**: This command will NOT, by design, respond to route/application changes that occur during or after it is first invoked. It is up to consuming tools to ensure that the `odo component status` command is stopped/restarted as needed. + - For example, if the user tells the IDE to delete their application with the IDE UI, the IDE will call `odo delete (component)`; at the same time, the IDE should also abort any existing `odo component status` commands that are running (as these are no longer guaranteed to return a valid status now that the application itself no longer exists). `odo component status` will not automatically abort when the application is deleted (because it has no reliable way to detect this in all cases). + - Another example: if the IDE adds a new URL via `odo url create [...]`, any existing `odo component status` commands that are running should be aborted, as these commands would still only be checking the URLs that existed when the command was first invoked (eg there is intentionally no cross-process notification mechanism for created/updated/deleted URLs implemented as part of this command.) + - See discussion of this design descision in 'Other solutions considered' below + +This is an example an `odo component status -o json` command invocation look like: +``` +odo component status -o json + +{ "componentURLStatus" : { "url" : "https://(...)", "response" : "true", "responseCode" : 200, "timestamp" : (UTC unix epoch seconds.microseconds) } } +{ "componentURLStatus" : { "url" : "https://(...)", "response" : "false", error: "host unreachable", "timestamp" : (...) } } +{ "containerStatus" : { "status" : "containercreating", "timestamp" : (...)} } +{ "containerStatus" : { "status" : "running", "timestamp" : (...)} } +{ "supervisordCtlStatus" : { "name": "devrun", "status" : "STARTED", "timestamp" : (...)} } +(...) + +(Exact details on event name, and JSON format are TBD; feedback welcome!) +``` + +To keep from overwhelming the output, only state changes would be printed (after an initial state output), rather than every specific event. + +## Consumption of odo via external tools, such as IDEs + +Based on our existing knowledge from previously building similar application/build-status tracking systems in Eclipse Codewind, we believe the above described commands should allow any external tool to provide a detailed status for odo-managed applications. + +The proposed changes ensure that the the high-level logic around tracking application changes across time can be managed by external tools (such as IDEs) as desired, without the need to leak/"pollute" odo with any of these details. These changes give consuming tools all the data they need ensure fast, reliable, up-to-date and (where possible) detailed build/application status. + + +### What happens if the network connection is lost while executing these commands? + +One potential challenge is how to handle network connection instability when the push/log/status commands are actively running. Both odo, and any external consuming tools, should be able to ensure that the odo-managed application can be returned to a stable state once the connection is re-established. + +We can look at how each command should handle a temporary network disconnection: +- If network connection is dropped during *push*: consuming tool can restart the push command from scratch. Well-written dev files should be nuking any existing build processes (for example, when running a 'build' action, that build action should look for any old maven processes and kill them, if there are any that are already running; or said another way, it is up to the build action of a devfile to ensure that container state is consistent before starting a new build) +- If connection is dropped during *logs*: start a new tail, and then do a full 'get logs' to make sure we didn't miss anything; match up the two (the full log and the tail) as best as possible, to prevent duplicates. (The Kubernetes API may already have a better way of handling this; this is the "naive" algorithm) +- If connection is dropped during *status*: no special behaviour is needed here. + +## Other solutions considered + +Fundamentally, this proposal needs to find a solution to this scenario: + +1) IDE creates a URL (calls `odo url create`) and pushes the user's code (calls `odo push`) +2) To get the status of that app, the IDE runs `odo component status -o json` to start the long-running odo process. The status command then helpfully reports the pod/url/supervisord container status, which allows the IDE to determine when the app process is up. +3) *[some time passes]* +4) IDE creates a new URL (or performs some other action that invalidates the existing `odo status` state, such as `odo component delete`) by calling `odo url create`. +5) The long-running `odo status` process is still running, but somehow needs to know about the new URL from step 4 (or other events). + +Thus, in some way, that existing long-running `odo status` process needs to be informed of the new event (a new url event, a component deletion event, etc). Since these events are generated across independent OS processes, this requires some form of [IPC](https://en.wikipedia.org/wiki/Inter-process_communication). + +### Some options in how to communicate these data across independent odo processes (in ascending order of complexity) + +#### 1) Get the IDE/consuming tool to manage the lifeycle of `odo component status` + +This is the solution proposed in this proposal, and is included for contrast. + +Since the IDE has a lifecycle that is greater than each of the individual calls to `odo`, and the IDE is directly and solely responsible for calling odo (when the user is interacting with the IDE), it is a good fit to ensure the state of `odo component status` is up-to-date and consistent. + +But this option is by no means a perfect solution: +- This does introduce complexity on the IDE side, as the IDE needs to keep track of which `odo` processes are running for each component, and it needs to know when/how to respond to actions (delete/url create/etc). But since the IDE is a monolithic process, this is at least straightforward (I mocked up the algorithm that the IDE will use in each case, which I can share if useful.) +- This introduces complexity for EVERY new IDE/consuming tool that uses this mechanism; rather than solving it once in ODO, it needs to be solved X times for X IDEs. +- Requires multiple concurrent long-running odo processes per odo-managed component + +#### 2) `odo component status` could monitor files under the `.odo` directory in order to detect changes; for example, if a new URL is added to `.odo/env/env.yaml`, `odo component status` would detect that and update the URLs it is checking + +This sounds simple, but is surprisingly difficult: +- No way to detect a delete operation just by watching `.odo` directory: at present, `odo delete` does not delete/change any of the files under `.odo` +- Partial file writes/atomicity/file locking: How to ensure that when `odo component status` reads a file that it has been fully written by the producing process? One way is to use file locks, but that means using/testing each supported platform's file locking mechanisms. Then need to implement a cross-process polling mechanism. +- Or, need to implement a cross-platform [filewatching mechanism](https://github.com/fsnotify/fsnotify): We need a way to watch the `.odo` directory and respond to I/O events to the files, either by modification. +- Windows: Unlike other supported platforms, Windows has a number of quirky file-system behaviours that need to be individually handled. The most relevant one here is that Windows will not let you delete/modify a file in one process if another process is holding it open (we have been bitten by this a number of times in Codewind) +- Need to support all filesystems: some filesystems have different file write/locking/atomicity guarantees for various operations. + + +#### 3) Convert odo into a multi-process client-server architecture + +Fundamentally this problem is about how to share state between odo processes; if odo instead used a client-server architure, odo state could be centrally/consistently managed via a single server process, and communicated piecemeal to odo clients. + +As one example of this, we could create a new odo process/command (`odo status --daemon`?) that would listen on some IPC mechanism (TCP/IP sockets/named pipes/etc) for events from individual odo commands: +1) IDE runs `odo status --daemon --port=32272` as a long-running process; the daemon listens on localhost:32272 for events. The daemon will output component/build status to stdout, which the IDE can provide back to the user. +2) IDE calls `odo url create` to create a new URL, but includes the daemon information in the request: `odo url create --port=32272 (...)` +3) The `odo url create` process updates the `env.yaml` to include the URL, then connects to the daemon on `localhost:32272` and informs the daemon of the new url. +4) The daemon receives the new URL event, and reconciles it with its existing state for the application, and begins watching the new URL. +(This would be need to be implemented for every odo change event) + + +Drawbacks: +- Odo's code currently assumes that commands are short-lived, mostly single-threaded, and compartmentalized; switching to a server would fundamentally alter this presumption of existing code +- Much more complex to implement versus other options: requires changing the architecture of the odo tool into a multithreaded client-server model, meaning many more moving parts, and [the perils of distributed computing](https://en.wikipedia.org/wiki/Fallacies_of_distributed_computing). +- Most be cross-platform; IPC mechanisms/behaviour are VERY platform-specific, so we probably need to use TCP/IP sockets. +- But, if using HTTP/S over TCP/IP socket, we need to secure endpoints; just listening on localhost [is not necessarily enough to ensure local-only access](https://bugs.chromium.org/p/project-zero/issues/detail?id=1524). +- Plus some corporate developer environments may use strict firewall rules that prevent server sockets, even on localhost ports. + +Variants on this idea: 1) a new odo daemon/LSP-style server process that was responsible for running ALL odo commands; calls to the `odo` CLI would just initiate a request to the server, and the server would be responsible for performing the action and monitoring the status + +### Proposed option vs options 2/3 + +Hopefully the inherent complexity of options 2-3 is fully conveyed above, but if you all have another fourth option, let me know. + +Ultimately, this proposal (option 1) cleanly solves the problem, puts the complexity in the right place (the IDE), is straight-forward to implement, is not time consuming to implement, and does not fundamentally alter the odo architecture. + +And this option definitely does not in any way tie our hands in implementing a more complex solution in the future if/when we our requirements demand it. diff --git a/docs/proposals/odo-deploy.md b/docs/proposals/odo-deploy.md new file mode 100644 index 00000000000..04841cdf255 --- /dev/null +++ b/docs/proposals/odo-deploy.md @@ -0,0 +1,148 @@ +# Support build and deployment of application container image using odo + +## Abstract +Add a new command (verb) to build a production-like/slim container image for the project and deploy the built image on the target platform. + +There is an existing proposal for adding on outer-loop information (including support for multiple strategies for building and deploying the projects) to Devfile 2.1.0: +https://github.com/devfile/kubernetes-api/issues/49 + +It would be useful to start the design/development of a simpler version of `odo deploy` with devfile 2.0.0 that covers: +- single build strategy - Dockerfile built using `BuildConfig` or `Kaniko`. +- single deployment strategy - Kubernetes manifest deployed using `kubectl`. + +## Motivation +`odo` is limited to development inner-loop for a project and there is no support for outer-loop - build a slim/production container image for your application and deploy the build container. It would be very useful for developers to be able to try inner-loop and then transition over to the outer-loop using odo. The outer-loop information could be provided by the application stack (devfile) to avoid developers having to worry about these aspects. + +`odo deploy` can be a good way to assure the developer that their application can be built and deployed successfully using the build/deploy guidance that comes from devfile. + +It is not meant to replace GitOps/pipeline-based deployments for governed environments (test, stage, production). However, both `odo deploy` and `odo pipelines` must honour the build and deployment information provided by the application stack (devfile). + +## User flow + +This command will allow a user to test the transition from inner-loop t outer-loop, so the application is truly ready for pipelines to take over. Here's how a typical application development flow might look like: + +User flow: +1. `odo create ` - This initializes odo component in the current directory. +1. Edit the project source code to develop the application. +1. `odo url create` - This stores URL information (host, port etc.) for accessing the application on the cluster (if not done already). +1. `odo push` - This runs the application source code using inner-loop instructions from devfile. +1. Validate the running application is working as intended. +1. Iterate over steps 2 and beyond (as needed). +1. `odo deploy` - This would build a new clean image and deploy it to the target cluster using outer-loop instructions from devfile and user-provided arguments. +1. Validate the deployed application is working as intended. +1. Iterate over steps 2 and beyond (as needed). +1. Optionally, run `odo deploy delete` to clean up resources created with `odo deploy`. +1. Push your code to Git - ready for sharing with your team and for CI/CD pipelines to take over. + +## User stories + +### Initial build and deploy support to odo - https://github.com/openshift/odo/issues/3300 + +## Design overview +`odo deploy` could provide developers with a way to build a container image for their application and deploy it on a target Kubernetes deployment using the build/deploy guidance provided by the devfile. + +This deployment is equivalent to a development version of your production and will be using the namespace and URL information from the inner-loop. This will ensure that it is not seen as a way to deploy real workloads in production. + +`odo deploy delete` could provide developers with a way to clean up any existing deployment of the application. + +### High-level design: + +#### Pre-requisites: +- The implementation would be under the experimental flag. +- Only supported for Devfile v2.0.0 components. +- Only supported for Kubernetes/OpenShift targets. + +#### odo deploy +This command will build a container image for the application and deploy it on the target Kubernetes environment. + +Flags: + - `--tag`: The tag to be used for the built application container image - `//:` (optional) + - Openshift: the tag can be generated based on project details and internal image registry can be used + - Kubernetes: the user must provide the fully qualified registry and tag details. + - `--credentials`: The credentials needed to push the image to the container image registry (optional) + - Openshift: default builder account for BuildConfig is used + - Kubernetes: the user must provide the credentials + - `--force`: Delete the existing deployment (if present) and re-create a new deployment. + +#### odo deploy delete +This command will delete any resources created by odo deploy. + +### Detailed design: + +### Devfile + +For the initial implementation, we could use devfile v2.0.0 and capture basic outer-loop information as `metadata` on the devfile. `odo` could look for specific keys, while other tools like Che could ignore them. + +For example: +``` +metadata: + alpha.build-dockerfile: + alpha.deployment-manifest: +``` + +### Dockerfile +This could be any valid dockerfile. + +### Deployment manifest +The deployment manifest could be templated to help with replacing key bits of information: +- COMPONENT_NAME +- CONTAINER_IMAGE +- PORT + +Examples: +- Standard Kubernetes resources (deployment, service, route): https://github.com/groeges/devfile-registry/blob/master/devfiles/nodejs/deploy_deployment.yaml + +- Operator based resources e.g. Runtime component operator (https://operatorhub.io/operator/): https://github.com/groeges/devfile-registry/blob/master/devfiles/nodejs/deploy_runtimecomponent.yaml + +- Knative service: https://github.com/groeges/devfile-registry/blob/master/devfiles/nodejs/deploy_knative.yaml + +### odo deploy +This command will perform the following actions: + +#### Input validation +- Check if the devfile is v2.0.0 and that it specifies the expected outer-loop metadata. + - If not provided, display a meaningful error message. +- Validate all arguments passed by the user. + - If argument values are invalid, display a meaningful error message. + +#### Build +- If the cluster supports `BuildConfig`, use it: + - Use BuildConfig to build and push a container image by using: + - the source code in the user's workspace + - dockerfile specified by the devfile + - tag generated based on component and internal registry details +- If the cluster does not support `BuildConfig`, switch to Kaniko: + - Use Kaniko to build and push a container image by using: + - the source code in the user's workspace + - dockerfile specified by the devfile + - tag provided by the user + - credentials provided by the user +- Cleanup all build resources created above. + +#### Deploy +- Delete existing deployment, (if invoked with --force flag) +- Fetch the deployment manifest using URI in the metadata of the devfile. +- Replace templated text in the deployment manifest with relevant values: + - COMPONENT_NAME: name of odo component/microservice + - CONTAINER_IMAGE: `tag` for the built image + - PORT: URL information in env.yaml +- Apply the new deployment manifest create/update the application deployment. +- Save the deployment manifest in `.odo/env/` folder. +- Provide the user with a URL for accessing the deployed application. + +### odo deploy delete +This command will perform the following actions: +- Check if there is an existing deployment for the app (can use the saved deployment manifest in `.odo/env/` folder) + - If found, delete the resources specified in the deployment manifest. + - If not found, show a meaningful error message to the user. + +## Future evolution + +- Devfile 2.1.0 should broaden the scope for the outer-loop support in devfiles. For example: + - Support multiple build strategies - buildah, s2i, build v3 etc. + - Support multiple deployment strategies - k8s manifest, native service, pod spec etc. + - Any referenced assets should be immutable to ensure reproducible builds/deployments. + +- If a devfile does not provide deployment manifest, odo can perhaps create a manifest in the way it does for inner-loop. This will mean devfile creators do not need to provide a deployment manifest if they do not care so much about the deployment aspect. + +- Once `odo link` and service binding is supported by odo and devfiles v2, we could use the same service binding information for `odo deploy`. \ No newline at end of file diff --git a/docs/proposals/secure-registry.md b/docs/proposals/secure-registry.md new file mode 100644 index 00000000000..1bd578a3f26 --- /dev/null +++ b/docs/proposals/secure-registry.md @@ -0,0 +1,77 @@ +# Secure Registry Support + +Table of contents +- [Problem Statement](#problem-statement) +- [Terminology](#terminology) +- [Proposed Design](#proposed-design) +- [Related Issues](#related-issues) + +## Problem Statement +Currently odo only supports registry that is hosted by the platform that has publicly signed certificate, we should support secure registry so that user is able to store the confidential devfile to the registry and let the platform with certificate in user's trust store host the registry, authentication is needed on user side to access the platform. + +## Terminology +Registry: registry is the place that stores index file (index.json) and devfile (devfile.yaml) so that user can catalog and create devfile component from the registry. The registry itself can be hosted on GitHub (GitHub-hosted registry) or Cluster (Cluster-hosted registry) + +Authentication method (Credential): +- Username/Password: usually user has full access to the resource with Username/Password authentication method. +- Personal Access Token (PAT): this is the recommended authentication method as user can grant limited resource access to the personal access token to make the resource access more secure. + +## Proposed Design +Support Scenarios: + +In order to make secure registry support feature more clear and specific, we should support the following scenarios: +1. GitHub-hosted registry + 1. GitHub public: + - Clients authenticate with GitHub personal access token + - TLS achieved with Git public CA signed certification in client trust store + 2. GitHub Enterprise: + - Clients authenticate with GHE personal access token + - TLS achieved with GHE public CA signed certification in client trust store +2. Cluster-hosted registry + - Clients authenticate with service account token + - TLS achieved with ingress gateway CA signed certification in client trust store + +Context: +1. Given GitHub is going to depreciate basic authentication with username/password https://developer.github.com/changes/2020-02-14-deprecating-password-auth/, we have to only support personal access token authentication method for GitHub-hosted registry scenario. +2. For cluster-hosted registry, the registry architecture would be creating a NGINX server to host the secure registry, then create a ingress gateway for the NGNIX server to let client access. + +Work flow to access secure registry: +1. Collect Credential + + Currently we have `odo registry add ` and `odo registry update ` to add and update registry accordingly. Regarding the CLI design, we can implement the following CLI design to support collecting credential: + - `odo registry add --token ` + - `odo registry add --user --password ` + - `odo registry update --token ` + - `odo registry update --user --password ` + +2. Store Credential + + We can use third-party package keyring(https://github.com/zalando/go-keyring) to help store user's credential, this package is platform agnostic, which means it can automatically use the existing keyring instance on the platform, for example: + - Mac: Implementation depends on the /usr/bin/security binary for interfacing with the OS X keychain. + - Linux: Implementation depends on the Secret Service dbus interface, which is provided by GNOME Keyring. + - Windows: Windows Credential Manager support by using the lib https://github.com/danieljoos/wincred. + +3. Use Credential + + We can still use the built-in package `net/http` with adding token to the request header, sample code: + ``` + token := "123abc" + bearer := "Bearer " + token + req, err := http.NewRequest("GET", url, nil) + if err != nil { + log.Println(err) + } + req.Header.Add("Authorization", bearer) + ``` + +4. Delete Credential + + If multiple secure registries share the same credential, `odo registry delete ` will delete the credential from keyring instance once the last secure registry using that credential has been deleted. + +Create devfile component from secure registry: + +When downloading devfile from secure registry, we validate if the credential is valid by adding token to the request header and checking the response. + +## Related issues +- Dynamic registry support: https://github.com/openshift/odo/pull/2940 +- Performance improvement for `odo catalog list components`: https://github.com/openshift/odo/pull/3112 \ No newline at end of file diff --git a/docs/public/community.adoc b/docs/public/community.adoc index 458ba4f55c1..725c1aad684 100644 --- a/docs/public/community.adoc +++ b/docs/public/community.adoc @@ -1,6 +1,6 @@ == Events -=== Upcoming events +=== Past events * https://www.redhat.com/en/summit[April 28-29 2020 - Red Hat Summit] * https://www.ibm.com/events/think/[May 5-6 2020 - IBM Think Digital] diff --git a/docs/public/debugging-using-devfile.adoc b/docs/public/debugging-using-devfile.adoc new file mode 100644 index 00000000000..3d74c665e04 --- /dev/null +++ b/docs/public/debugging-using-devfile.adoc @@ -0,0 +1,62 @@ +# Debugging components using devfiles + +`odo` uses devfiles to build and deploy components. More information on devifles : https://redhat-developer.github.io/devfile/[Introduction to devfile] + +To enable debugging mode for the component using devfiles, we need to enable the experimental mode for odo. This can be done by: `odo preference set experimental true`. We also need a devfile with `debugrun` step. Example of a nodejs devfile with a debugrun step: + +```yaml +apiVersion: 1.0.0 +metadata: + name: test-devfile +projects: + - + name: nodejs-web-app + source: + type: git + location: "https://github.com/che-samples/web-nodejs-sample.git" +components: + - type: dockerimage + image: quay.io/eclipse/che-nodejs10-ubi:nightly + endpoints: + - name: "3000/tcp" + port: 3000 + alias: runtime + env: + - name: FOO + value: "bar" + memoryLimit: 1024Mi + mountSources: true +commands: + - name: devbuild + actions: + - type: exec + component: runtime + command: "npm install" + workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + - name: devrun + actions: + - type: exec + component: runtime + command: "nodemon app.js" + workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + - name: debugrun + actions: + - type: exec + component: runtime + command: "nodemon --inspect=${DEBUG_PORT}" + workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/ +``` + +- Now we need to create the component using `odo create nodejs` +- Next we enable remote debugging for the component using `odo push --debug`. We can also use a custom step as the debugrun step using `odo push --debug --debug-command="custom-step"` +- Next we port forward a local port for debugging using `odo debug port-forward`. The default local port used for debugging is 5858. If 5858 is occupied, odo will automatically pick up a local port. We can also specify the local port using, `odo debug port-forward --local-port 5858` +- Next we need to attach the debugger to the local port. Here's a guide to do it for VS Code : https://code.visualstudio.com/docs/nodejs/nodejs-debugging#_remote-debugging[Remote Debugging] + +### Check if a debugging session is running + +We can check if a debugging session is running for a component by using `odo debug info` + +``` +odo debug info +Debug is running for the component on the local port : 5858 +``` \ No newline at end of file diff --git a/docs/public/deploying-a-devfile-using-odo.adoc b/docs/public/deploying-a-devfile-using-odo.adoc index 3c9f0c693f0..ea8ed4de514 100644 --- a/docs/public/deploying-a-devfile-using-odo.adoc +++ b/docs/public/deploying-a-devfile-using-odo.adoc @@ -93,12 +93,13 @@ In our example, we will be using `java-spring-boot` to deploy a sample https://s In this example we will be deploying an https://github.com/odo-devfiles/springboot-ex[example Spring Boot® component] that uses https://maven.apache.org/install.html[Maven] and Java 8 JDK. -. Download the example Spring Boot® component +. Download the example Spring Boot® component. + [source,sh] ---- $ git clone https://github.com/odo-devfiles/springboot-ex ---- +Alternatively, you can pass in `--downloadSource` to `odo create` to have odo download a sample project. . Change the current directory to the component directory: + @@ -107,14 +108,6 @@ In this example we will be deploying an https://github.com/odo-devfiles/springbo $ cd ---- -. List the contents of the directory to confirm that the front end is indeed a Java application: -+ -[source,sh] ----- - $ ls - chart Dockerfile Dockerfile-build Dockerfile-tools Jenkinsfile pom.xml README.md src ----- - . Create a component configuration using the `java-spring-boot` component-type named `myspring`: + [source,sh] @@ -123,10 +116,19 @@ In this example we will be deploying an https://github.com/odo-devfiles/springbo Experimental mode is enabled, use at your own risk Validation - ✓ Checking devfile compatibility [71105ns] - ✓ Validating devfile component [153481ns] + ✓ Checking devfile compatibility [195728ns] + ✓ Creating a devfile component from registry: DefaultDevfileRegistry [170275ns] + ✓ Validating devfile component [281940ns] + + Please use odo push command to create the component with source deployed +---- - Please use odo push command to create the component with source deployed +. List the contents of the directory to see the devfile and sample Java application source code: ++ +[source,sh] +---- + $ ls + README.md devfile.yaml pom.xml src ---- . Create a URL in order to access the deployed component: @@ -145,18 +147,27 @@ NOTE: You must use your cluster host domain name when creating your URL. + [source,sh] ---- - $ odo push - • Push devfile component myspring ... - ✓ Waiting for component to start [30s] + $ odo push + + Validation + ✓ Validating the devfile [81808ns] + + Creating Kubernetes resources for component myspring + ✓ Waiting for component to start [5s] + + Applying URL changes + ✓ URL myspring-8080: http://myspring-8080.apps-crc.testing created + + Syncing to component myspring + ✓ Checking files for pushing [2ms] + ✓ Syncing files to the component [1s] + + Executing devfile commands for component myspring + ✓ Executing devbuild command "/artifacts/bin/build-container-full.sh" [1m] + ✓ Executing devrun command "/artifacts/bin/start-server.sh" [2s] - Applying URL changes - ✓ URL myspring-8080: http://myspring-8080.apps-crc.testing created - ✓ Checking files for pushing [752719ns] - ✓ Syncing files to the component [887ms] - ✓ Executing devbuild command "/artifacts/bin/build-container-full.sh" [23s] - ✓ Executing devrun command "/artifacts/bin/start-server.sh" [2s] - ✓ Push devfile component myspring [57s] - ✓ Changes successfully pushed to component + Pushing devfile component myspring + ✓ Changes successfully pushed to component ---- . List the URLs of the component: @@ -176,6 +187,16 @@ NOTE: You must use your cluster host domain name when creating your URL. $ curl http://myspring-8080.apps-crc.testing ---- +. To delete your deployed application: ++ +[source,sh] +---- + $ odo delete + ? Are you sure you want to delete the devfile component: myspring? Yes + ✓ Deleting devfile component myspring [152ms] + ✓ Successfully deleted component +---- + == Deploying a Node.js® component to an OpenShift cluster In this example we will be deploying an https://github.com/odo-devfiles/nodejs-ex[example Node.js® component] that uses https://www.npmjs.com/[NPM]. @@ -210,8 +231,9 @@ In this example we will be deploying an https://github.com/odo-devfiles/nodejs-e Experimental mode is enabled, use at your own risk Validation - ✓ Checking devfile compatibility [106956ns] - ✓ Validating devfile component [250318ns] + ✓ Checking devfile compatibility [111738ns] + ✓ Creating a devfile component from registry: DefaultDevfileRegistry [89567ns] + ✓ Validating devfile component [186982ns] Please use odo push command to create the component with source deployed ---- @@ -232,18 +254,27 @@ NOTE: You must use your cluster host domain name when creating your URL. + [source,sh] ---- - $ odo push - • Push devfile component mynodejs ... - ✓ Waiting for component to start [27s] + $ odo push + + Validation + ✓ Validating the devfile [89380ns] - Applying URL changes - ✓ URL mynodejs-3000: http://mynodejs-3000.apps-crc.testing created - ✓ Checking files for pushing [1ms] - ✓ Syncing files to the component [839ms] - ✓ Executing devbuild command "npm install" [3s] - ✓ Executing devrun command "nodemon app.js" [2s] - ✓ Push devfile component mynodejs [33s] - ✓ Changes successfully pushed to component + Creating Kubernetes resources for component mynodejs + ✓ Waiting for component to start [3s] + + Applying URL changes + ✓ URL mynodejs-3000: http://mynodejs-3000.apps-crc.testing created + + Syncing to component mynodejs + ✓ Checking files for pushing [2ms] + ✓ Syncing files to the component [1s] + + Executing devfile commands for component mynodejs + ✓ Executing devbuild command "npm install" [3s] + ✓ Executing devrun command "nodemon app.js" [2s] + + Pushing devfile component mynodejs + ✓ Changes successfully pushed to component ---- . List the URLs of the component: @@ -263,6 +294,16 @@ NOTE: You must use your cluster host domain name when creating your URL. $ curl http://mynodejs-8080.apps-crc.testing ---- +. To delete your deployed application: ++ +[source,sh] +---- + $ odo delete + ? Are you sure you want to delete the devfile component: mynodejs? Yes + ✓ Deleting devfile component mynodejs [139ms] + ✓ Successfully deleted component +---- + == Deploying a Java Spring Boot® component locally to Docker In this example, we will be deploying the same Java Spring Boot® component we did earlier, but to a locally running Docker instance. @@ -278,18 +319,34 @@ In this example, we will be deploying the same Java Spring Boot® component we d ---- + +. Download the example Spring Boot® component. ++ +[source,sh] +---- + $ git clone https://github.com/odo-devfiles/springboot-ex +---- +Alternatively, you can pass in `--downloadSource` to `odo create` to have odo download a sample project. + +. Change the current directory to the component directory: ++ +[source,sh] +---- + $ cd +---- + . Create a component configuration using the `java-spring-boot` component-type named `mydockerspringboot`: + [source,sh] ---- - $ odo create java-spring-boot mydockerspringboot - Experimental mode is enabled, use at your own risk + $ odo create java-spring-boot mydockerspringboot + Experimental mode is enabled, use at your own risk - Validation - ✓ Checking devfile compatibility [26759ns] - ✓ Validating devfile component [75889ns] + Validation + ✓ Checking devfile compatibility [195728ns] + ✓ Creating a devfile component from registry: DefaultDevfileRegistry [170275ns] + ✓ Validating devfile component [281940ns] - Please use odo push command to create the component with source deployed + Please use odo push command to create the component with source deployed ---- . Create a URL in order to access the deployed component: @@ -297,7 +354,7 @@ In this example, we will be deploying the same Java Spring Boot® component we d [source,sh] ---- $ odo url create --port 8080 - ✓ URL local-mydockerspringboot-8080 created for component: mydockerspringboot with exposed port: 37833 + ✓ URL java-spring-boot-8080 created for component: java-spring-boot with exposed port: 59382 To apply the URL configuration changes, please use odo push ---- @@ -309,18 +366,26 @@ In order to access the docker application, exposed ports are required and automa [source,sh] ---- $ odo push - • Push devfile component mydockerspringboot ... - ✓ Pulling image maysunfaisal/springbootbuild [601ms] - - Applying URL configuration - ✓ URL 127.0.0.1:37833 created - ✓ Starting container for maysunfaisal/springbootbuild [550ms] - ✓ Pulling image maysunfaisal/springbootruntime [581ms] - - Applying URL configuration - ✓ URL 127.0.0.1:37833 created - ✓ Starting container for maysunfaisal/springbootruntime [505ms] - ✓ Push devfile component mydockerspringboot [2s] + + Validation + ✓ Validating the devfile [52685ns] + + Creating Docker resources for component java-spring-boot + ✓ Pulling image maysunfaisal/springbootbuild [879ms] + ✓ Starting container for maysunfaisal/springbootbuild [397ms] + ✓ Pulling image maysunfaisal/springbootruntime [1s] + ✓ URL 127.0.0.1:59382 created + ✓ Starting container for maysunfaisal/springbootruntime [380ms] + + Syncing to component java-spring-boot + ✓ Checking files for pushing [2ms] + ✓ Syncing files to the component [231ms] + + Executing devfile commands for component java-spring-boot + ✓ Executing devbuild command "/artifacts/bin/build-container-full.sh" [1m] + ✓ Executing devrun command "/artifacts/bin/start-server.sh" [1s] + + Pushing devfile component java-spring-boot ✓ Changes successfully pushed to component ---- + @@ -329,3 +394,20 @@ When odo deploys a devfile component, it pulls the images for each `dockercontai Each docker container that is deployed is labeled with the name of the odo component. + Docker volumes are created for the project source, and any other volumes defined in the devfile and mounted to the necessary containers. + +. View your deployed application using the generated URL: ++ +[source,sh] +---- + $ curl http://127.0.0.1:59382 +---- + +. To delete your deployed application: ++ +[source,sh] +---- + $ odo delete + ? Are you sure you want to delete the devfile component: java-spring-boot? Yes + ✓ Deleting devfile component java-spring-boot [139ms] + ✓ Successfully deleted component +---- \ No newline at end of file diff --git a/glide.lock b/glide.lock index a4ececa71be..2b314c42ea3 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: bfc9d5f9a6898813f88cb63176de326841ac3fb3a147f5a18a818cf465b25a70 -updated: 2020-06-17T08:12:19.260051505-04:00 +updated: 2020-06-25T23:34:06.153911702-04:00 imports: - name: cloud.google.com/go version: 8c41231e01b2085512d98153bcffb847ff9b4b9f @@ -335,7 +335,7 @@ imports: - specs-go - specs-go/v1 - name: github.com/openshift/api - version: 3e8f22fb0b56808d981b0a98cd2741d31368aec5 + version: 7192180f496aab1f7659d8660fc360498bab498b subpackages: - apps/v1 - authorization/v1 @@ -390,7 +390,7 @@ imports: - pkg/build/naming - pkg/oauth/oauthdiscovery - name: github.com/openshift/oc - version: d038424d6d4f1cc39ad586ac0d36dac3a7a37ceb + version: d89e458c3dff553f9a732b282830bfa9b4e0ab9b subpackages: - pkg/cli/login - pkg/helpers/errors @@ -402,7 +402,7 @@ imports: - pkg/helpers/term - pkg/helpers/tokencmd - name: github.com/operator-framework/operator-lifecycle-manager - version: 33671ebb9929e837d1e9f8104476bf278f203080 + version: 9f95e55cd49a63b85ab5ca5f087a9fa13af84a17 subpackages: - pkg/api/apis/operators - pkg/api/apis/operators/v1 @@ -477,7 +477,7 @@ imports: - assert - require - name: github.com/tektoncd/pipeline - version: 648cff3dfe1330c5035492437adee6bf1670d941 + version: 44f22a067b7576479f8d8b186e9521e9b7713be0 subpackages: - pkg/apis/config - pkg/apis/pipeline @@ -491,7 +491,7 @@ imports: - pkg/reconciler/pipeline/dag - pkg/substitution - name: github.com/tektoncd/triggers - version: 9baf220e6fb5f71e70968e01b1bfaa2a12a95cb6 + version: 9c451669426843783c6a012d9437b348cebc4729 subpackages: - pkg/apis/triggers/v1alpha1 - name: github.com/xeipuuv/gojsonpointer @@ -720,7 +720,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 65afa901f75a71453aa6efcbbc033641003bb555 + version: b0635c302351c736bacc30717e901998c0b94c85 subpackages: - pkg/authentication/authenticator - pkg/authentication/request/x509 @@ -1008,7 +1008,7 @@ imports: - integer - trace - name: knative.dev/pkg - version: ffb929374a39cc0659b18100e9e2361f5106a81f + version: f1ee372577e156c0589dcaa8f166f84dce519f60 subpackages: - apis - apis/duck diff --git a/openshift-ci/build-root/Dockerfile b/openshift-ci/build-root/Dockerfile index 7215f03342d..2579f3be023 100644 --- a/openshift-ci/build-root/Dockerfile +++ b/openshift-ci/build-root/Dockerfile @@ -1,7 +1,6 @@ # Dockerfile to bootstrap build and test in openshift-ci -FROM registry.svc.ci.openshift.org/openshift/release:golang-1.13 +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 RUN yum -y install make wget gcc git httpd-tools -RUN mkdir -p /tmp/secret \ No newline at end of file diff --git a/openshift-ci/build-root/multistage/Dockerfile b/openshift-ci/build-root/multistage/Dockerfile new file mode 100644 index 00000000000..747b3d8663e --- /dev/null +++ b/openshift-ci/build-root/multistage/Dockerfile @@ -0,0 +1,10 @@ +# Dockerfile to bootstrap build and test in openshift-ci + +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 + +RUN yum -y install make wget gcc git httpd-tools + +# This is a temporary change to make sure that it should run on both template based as well as multi-stage test +# will remove this change once we completely migrate to multi-stage test infra +COPY openshift-ci/build-root/multistage/check-oc.sh . +RUN ./check-oc.sh diff --git a/openshift-ci/build-root/multistage/check-oc.sh b/openshift-ci/build-root/multistage/check-oc.sh new file mode 100755 index 00000000000..0f31fed6f00 --- /dev/null +++ b/openshift-ci/build-root/multistage/check-oc.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -x + +## Constants +OC_BINARY="./oc" + +# Copy oc binary to bin path +if [ -f $OC_BINARY ]; then + cp ./oc /usr/bin/oc +fi diff --git a/pkg/auth/login.go b/pkg/auth/login.go index 1d3e2e1dbbc..6f06c64bf3d 100644 --- a/pkg/auth/login.go +++ b/pkg/auth/login.go @@ -129,6 +129,7 @@ func filteredInformation(s []byte) []byte { s = bytes.Replace(s, []byte("new-project"), []byte("project create"), -1) s = bytes.Replace(s, []byte(""), []byte(""), -1) s = bytes.Replace(s, []byte("project "), []byte("project set "), -1) + s = bytes.Replace(s, []byte("odo projects"), []byte("odo project list"), -1) return s } diff --git a/pkg/catalog/catalog.go b/pkg/catalog/catalog.go index 9c605b70cf8..24ebe0ecca0 100644 --- a/pkg/catalog/catalog.go +++ b/pkg/catalog/catalog.go @@ -3,13 +3,17 @@ package catalog import ( "encoding/json" "fmt" + "net/url" "sort" "strings" + "sync" + + "github.com/openshift/odo/pkg/preference" imagev1 "github.com/openshift/api/image/v1" + "github.com/openshift/odo/pkg/devfile/adapters/common" "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/occlient" - "github.com/openshift/odo/pkg/preference" "github.com/openshift/odo/pkg/util" "github.com/pkg/errors" "gopkg.in/yaml.v2" @@ -21,31 +25,32 @@ const ( apiVersion = "odo.dev/v1alpha1" ) -// DevfileRegistries contains the links of all devfile registries -var DevfileRegistries = []string{ - "https://raw.githubusercontent.com/elsony/devfile-registry/master", - "https://che-devfile-registry.openshift.io/", -} - // GetDevfileRegistries gets devfile registries from preference file, // if registry name is specified return the specific registry, otherwise return all registries -func GetDevfileRegistries(registryName string) (map[string]string, error) { - devfileRegistries := make(map[string]string) +func GetDevfileRegistries(registryName string) (map[string]Registry, error) { + devfileRegistries := make(map[string]Registry) cfg, err := preference.New() if err != nil { return nil, err } + hasName := len(registryName) != 0 if cfg.OdoSettings.RegistryList != nil { for _, registry := range *cfg.OdoSettings.RegistryList { - if len(registryName) != 0 { + if hasName { if registryName == registry.Name { - devfileRegistries[registry.Name] = registry.URL + devfileRegistries[registry.Name] = Registry{ + Name: registry.Name, + URL: registry.URL, + } return devfileRegistries, nil } } else { - devfileRegistries[registry.Name] = registry.URL + devfileRegistries[registry.Name] = Registry{ + Name: registry.Name, + URL: registry.URL, + } } } } else { @@ -55,18 +60,62 @@ func GetDevfileRegistries(registryName string) (map[string]string, error) { return devfileRegistries, nil } -// GetDevfileIndex loads the devfile registry index.json -func GetDevfileIndex(devfileIndexLink string) ([]DevfileIndexEntry, error) { +// convertURL converts GitHub regular URL to GitHub raw URL, do nothing if the URL is not GitHub URL +// For example: +// GitHub regular URL: https://github.com/elsony/devfile-registry/tree/johnmcollier-crw +// GitHub raw URL: https://raw.githubusercontent.com/elsony/devfile-registry/johnmcollier-crw +func convertURL(URL string) (string, error) { + url, err := url.Parse(URL) + if err != nil { + return "", err + } + + if strings.Contains(url.Host, "github") && !strings.Contains(url.Host, "raw") { + // Convert path part of the URL + URLSlice := strings.Split(URL, "/") + if len(URLSlice) > 2 && URLSlice[len(URLSlice)-2] == "tree" { + // GitHub raw URL doesn't have "tree" structure in the URL, need to remove it + URL = strings.Replace(URL, "/tree", "", 1) + } else { + // Add "master" branch for GitHub raw URL by default if branch is not specified + URL = URL + "/master" + } + + // Convert host part of the URL + if url.Host == "github.com" { + URL = strings.Replace(URL, "github.com", "raw.githubusercontent.com", 1) + } else { + URL = strings.Replace(URL, url.Host, "raw."+url.Host, 1) + } + } + + return URL, nil +} + +const indexPath = "/devfiles/index.json" + +// getDevfileIndexEntries retrieves the devfile entries associated with the specified registry +func getDevfileIndexEntries(registry Registry) ([]DevfileIndexEntry, error) { var devfileIndex []DevfileIndexEntry - jsonBytes, err := util.HTTPGetRequest(devfileIndexLink) + URL, err := convertURL(registry.URL) + if err != nil { + return nil, errors.Wrapf(err, "Unable to convert URL %s", registry.URL) + } + registry.URL = URL + indexLink := registry.URL + indexPath + jsonBytes, err := util.HTTPGetRequest(indexLink) if err != nil { - return nil, errors.Wrapf(err, "Unable to download the devfile index.json from %s", devfileIndexLink) + return nil, errors.Wrapf(err, "Unable to download the devfile index.json from %s", indexLink) } err = json.Unmarshal(jsonBytes, &devfileIndex) if err != nil { - return nil, errors.Wrapf(err, "Unable to unmarshal the devfile index.json from %s", devfileIndexLink) + return nil, errors.Wrapf(err, "Unable to unmarshal the devfile index.json from %s", indexLink) + } + + for i := range devfileIndex { + devfileIndex[i].Registry = registry } return devfileIndex, nil @@ -121,15 +170,12 @@ func IsDevfileComponentSupported(devfile Devfile) bool { } if !hasRunCommand { - hasRunCommand = strings.Contains(command.Name, "devRun") + hasRunCommand = strings.Contains(strings.ToLower(command.Name), string(common.DefaultDevfileRunCommand)) } - if !hasBuildCommand { - hasBuildCommand = strings.Contains(command.Name, "devBuild") - } } - if hasDockerImage && hasAlias && hasBuildCommand && hasRunCommand { + if hasDockerImage && hasAlias && hasRunCommand { return true } @@ -138,63 +184,81 @@ func IsDevfileComponentSupported(devfile Devfile) bool { // ListDevfileComponents lists all the available devfile components func ListDevfileComponents(registryName string) (DevfileComponentTypeList, error) { - var catalogDevfileList DevfileComponentTypeList + catalogDevfileList := &DevfileComponentTypeList{} var err error + // TODO: consider caching registry information for better performance since it should be fairly stable over time // Get devfile registries catalogDevfileList.DevfileRegistries, err = GetDevfileRegistries(registryName) if err != nil { - return catalogDevfileList, err + return *catalogDevfileList, err } if catalogDevfileList.DevfileRegistries == nil { - return catalogDevfileList, nil + return *catalogDevfileList, nil } - for registryName, registryURL := range catalogDevfileList.DevfileRegistries { + // first retrieve the indices for each registry, concurrently + registryIndices := make([]DevfileIndexEntry, 0, 20) + devfileIndicesMutex := &sync.Mutex{} + retrieveRegistryIndices := util.NewConcurrentTasks(len(catalogDevfileList.DevfileRegistries)) + for _, reg := range catalogDevfileList.DevfileRegistries { // Load the devfile registry index.json - devfileIndexLink := registryURL + "/devfiles/index.json" - devfileIndex, err := GetDevfileIndex(devfileIndexLink) - if err != nil { - log.Warningf("Registry %s is not set up properly with error: %v", registryName, err) - break - } + registry := reg // needed to prevent the lambda from capturing the value + retrieveRegistryIndices.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + indexEntries, err := getDevfileIndexEntries(registry) + if err != nil { + log.Warningf("Registry %s is not set up properly with error: %v", registry.Name, err) + return + } - // 1. Load devfiles that indexed in devfile registry index.json - // 2. Populate devfile components with devfile data - // 3. Form devfile component list - for _, devfileIndexEntry := range devfileIndex { - devfileIndexEntryLink := devfileIndexEntry.Links.Link - - // Load the devfile - devfileLink := registryURL + devfileIndexEntryLink - // TODO: We send http get resquest in this function multiple times - // since devfile registry uses different links to host different devfiles, - // this can reduce the performance especially when we load devfiles from - // big registry. We may need to rethink and optimize this in the future - devfile, err := GetDevfile(devfileLink) + devfileIndicesMutex.Lock() + registryIndices = append(registryIndices, indexEntries...) + devfileIndicesMutex.Unlock() + }}) + } + if err := retrieveRegistryIndices.Run(); err != nil { + return *catalogDevfileList, err + } + + // 1. Load each devfile concurrently from the previously retrieved devfile index entries + // 2. Populate devfile components with devfile data + // 3. Add devfile component types to the catalog devfile list + retrieveDevfiles := util.NewConcurrentTasks(len(registryIndices)) + devfileMutex := &sync.Mutex{} + for _, index := range registryIndices { + // Load the devfile + devfileIndex := index // needed to prevent the lambda from capturing the value + link := devfileIndex.Registry.URL + devfileIndex.Links.Link + retrieveDevfiles.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + + // Note that this issues an HTTP get per devfile entry in the catalog, while doing it concurrently instead of + // sequentially improves the performance, caching that information would improve the performance even more + devfile, err := GetDevfile(link) if err != nil { - log.Warningf("Registry %s is not set up properly with error: %v", registryName, err) - break + log.Warningf("Registry %s is not set up properly with error: %v", devfileIndex.Registry.Name, err) + return } // Populate devfile component with devfile data and form devfile component list catalogDevfile := DevfileComponentType{ Name: strings.TrimSuffix(devfile.MetaData.GenerateName, "-"), - DisplayName: devfileIndexEntry.DisplayName, - Description: devfileIndexEntry.Description, - Link: devfileIndexEntryLink, + DisplayName: devfileIndex.DisplayName, + Description: devfileIndex.Description, + Link: devfileIndex.Links.Link, Support: IsDevfileComponentSupported(devfile), - Registry: Registry{ - Name: registryName, - URL: registryURL, - }, + Registry: devfileIndex.Registry, } + devfileMutex.Lock() catalogDevfileList.Items = append(catalogDevfileList.Items, catalogDevfile) - } + devfileMutex.Unlock() + }}) + } + if err := retrieveDevfiles.Run(); err != nil { + return *catalogDevfileList, err } - return catalogDevfileList, nil + return *catalogDevfileList, nil } // ListComponents lists all the available component types diff --git a/pkg/catalog/catalog_test.go b/pkg/catalog/catalog_test.go index e201f2685ef..bad8bdac9a4 100644 --- a/pkg/catalog/catalog_test.go +++ b/pkg/catalog/catalog_test.go @@ -187,7 +187,7 @@ OdoSettings: - Name: CheDevfileRegistry URL: https://che-devfile-registry.openshift.io/ - Name: DefaultDevfileRegistry - URL: https://raw.githubusercontent.com/elsony/devfile-registry/master`, + URL: https://github.com/elsony/devfile-registry`, )) if err != nil { t.Error(err) @@ -199,21 +199,30 @@ OdoSettings: tests := []struct { name string registryName string - want map[string]string + want map[string]Registry }{ { name: "Case 1: Test get all devfile registries", registryName: "", - want: map[string]string{ - "CheDevfileRegistry": "https://che-devfile-registry.openshift.io/", - "DefaultDevfileRegistry": "https://raw.githubusercontent.com/elsony/devfile-registry/master", + want: map[string]Registry{ + "CheDevfileRegistry": { + Name: "CheDevfileRegistry", + URL: "https://che-devfile-registry.openshift.io/", + }, + "DefaultDevfileRegistry": { + Name: "DefaultDevfileRegistry", + URL: "https://github.com/elsony/devfile-registry", + }, }, }, { name: "Case 2: Test get specific devfile registry", registryName: "CheDevfileRegistry", - want: map[string]string{ - "CheDevfileRegistry": "https://che-devfile-registry.openshift.io/", + want: map[string]Registry{ + "CheDevfileRegistry": { + Name: "CheDevfileRegistry", + URL: "https://che-devfile-registry.openshift.io/", + }, }, }, } @@ -232,7 +241,7 @@ OdoSettings: } } -func TestGetDevfileIndex(t *testing.T) { +func TestGetDevfileIndexEntries(t *testing.T) { // Start a local HTTP server server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { // Send response to be tested @@ -263,14 +272,15 @@ func TestGetDevfileIndex(t *testing.T) { // Close the server when test finishes defer server.Close() + const registryName = "some registry" tests := []struct { - name string - devfileIndexLink string - want []DevfileIndexEntry + name string + registry Registry + want []DevfileIndexEntry }{ { - name: "Test NodeJS devfile index", - devfileIndexLink: server.URL, + name: "Test NodeJS devfile index", + registry: Registry{Name: registryName, URL: server.URL}, want: []DevfileIndexEntry{ { DisplayName: "NodeJS Angular Web Application", @@ -282,6 +292,10 @@ func TestGetDevfileIndex(t *testing.T) { }, Icon: "/images/angular.svg", GlobalMemoryLimit: "2686Mi", + Registry: Registry{ + Name: registryName, + URL: server.URL, + }, Links: struct { Link string `json:"self"` }{ @@ -294,7 +308,7 @@ func TestGetDevfileIndex(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := GetDevfileIndex(tt.devfileIndexLink) + got, err := getDevfileIndexEntries(tt.registry) if !reflect.DeepEqual(got, tt.want) { t.Errorf("Got: %v, want: %v", got, tt.want) @@ -562,3 +576,40 @@ func MockImageStream() *imagev1.ImageStream { return imageStream } + +func TestConvertURL(t *testing.T) { + tests := []struct { + name string + URL string + wantURL string + }{ + { + name: "Case 1: GitHub regular URL without specifying branch", + URL: "https://github.com/GeekArthur/registry", + wantURL: "https://raw.githubusercontent.com/GeekArthur/registry/master", + }, + { + name: "Case 2: GitHub regular URL with master branch specified", + URL: "https://github.ibm.com/Jingfu-J-Wang/registry/tree/master", + wantURL: "https://raw.github.ibm.com/Jingfu-J-Wang/registry/master", + }, + { + name: "Case 3: GitHub regular URL with non-master branch specified", + URL: "https://github.com/elsony/devfile-registry/tree/johnmcollier-crw", + wantURL: "https://raw.githubusercontent.com/elsony/devfile-registry/johnmcollier-crw", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotURL, err := convertURL(tt.URL) + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gotURL, tt.wantURL) { + t.Errorf("Got url: %s, want URL: %s", gotURL, tt.wantURL) + } + }) + } +} diff --git a/pkg/catalog/types.go b/pkg/catalog/types.go index fd7eef68916..d8501b75ac7 100644 --- a/pkg/catalog/types.go +++ b/pkg/catalog/types.go @@ -35,6 +35,7 @@ type DevfileIndexEntry struct { Tags []string `json:"tags"` Icon string `json:"icon"` GlobalMemoryLimit string `json:"globalMemoryLimit"` + Registry Registry `json:"registry"` Links struct { Link string `json:"self"` } `json:"links"` @@ -72,7 +73,7 @@ type ComponentTypeList struct { // DevfileComponentTypeList lists all the DevfileComponentType's type DevfileComponentTypeList struct { - DevfileRegistries map[string]string + DevfileRegistries map[string]Registry Items []DevfileComponentType } diff --git a/pkg/component/component.go b/pkg/component/component.go index ba0bc9f5405..90d66764f69 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "strconv" "strings" "github.com/openshift/odo/pkg/devfile/adapters/common" @@ -695,7 +694,7 @@ func PushLocal(client *occlient.Client, componentName string, applicationName st compInfo := common.ComponentInfo{ PodName: pod.Name, } - err = exec.ExecuteCommand(client, compInfo, cmdArr, show) + err = exec.ExecuteCommand(client, compInfo, cmdArr, show, nil, nil) if err != nil { s.End(false) @@ -898,17 +897,7 @@ func GetComponentFromConfig(localConfig *config.LocalConfigInfo) (Component, err urls := localConfig.GetURL() if len(urls) > 0 { - // We will clean up the existing value of ports and re-populate it so that we don't panic in `odo describe` and don't show inconsistent info - // This will also help in the case where there are more URLs created than the number of ports exposed by a component #2776 - oldPortsProtocol, err := getPortsProtocolMapping(component.Spec.Ports) - if err != nil { - return Component{}, err - } - component.Spec.Ports = []string{} - for _, url := range urls { - port := strconv.Itoa(url.Port) - component.Spec.Ports = append(component.Spec.Ports, fmt.Sprintf("%s/%s", port, oldPortsProtocol[port])) component.Spec.URL = append(component.Spec.URL, url.Name) } } @@ -925,22 +914,6 @@ func GetComponentFromConfig(localConfig *config.LocalConfigInfo) (Component, err return Component{}, nil } -// This function returns a mapping of port and protocol. -// So for a value of ports {"8080/TCP", "45/UDP"} it will return a map {"8080": -// "TCP", "45": "UDP"} -func getPortsProtocolMapping(ports []string) (map[string]string, error) { - oldPortsProtocol := make(map[string]string, len(ports)) - for _, port := range ports { - portProtocol := strings.Split(port, "/") - if len(portProtocol) != 2 { - // this will be the case if value of a port is something like 8080/TCP/something-else or simply 8080 - return nil, errors.New("invalid mapping. Please update the component configuration") - } - oldPortsProtocol[portProtocol[0]] = portProtocol[1] - } - return oldPortsProtocol, nil -} - // ListIfPathGiven lists all available component in given path directory func ListIfPathGiven(client *occlient.Client, paths []string) (ComponentList, error) { var components []Component diff --git a/pkg/debug/info.go b/pkg/debug/info.go index 510425aea8f..0b830ad0ae3 100644 --- a/pkg/debug/info.go +++ b/pkg/debug/info.go @@ -11,7 +11,6 @@ import ( "strings" "syscall" - "github.com/openshift/odo/pkg/occlient" "github.com/openshift/odo/pkg/testingutil/filesystem" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" @@ -24,18 +23,23 @@ type OdoDebugFile struct { } type OdoDebugFileSpec struct { - App string `json:"app"` + App string `json:"app,omitempty"` DebugProcessID int `json:"debugProcessID"` RemotePort int `json:"remotePort"` LocalPort int `json:"localPort"` } // GetDebugInfoFilePath gets the file path of the debug info file -func GetDebugInfoFilePath(client *occlient.Client, componentName, appName string) string { +func GetDebugInfoFilePath(componentName, appName string, projectName string) string { tempDir := os.TempDir() debugFileSuffix := "odo-debug.json" - s := []string{client.Namespace, appName, componentName, debugFileSuffix} - debugFileName := strings.Join(s, "-") + var arr []string + if appName == "" { + arr = []string{projectName, componentName, debugFileSuffix} + } else { + arr = []string{projectName, appName, componentName, debugFileSuffix} + } + debugFileName := strings.Join(arr, "-") return filepath.Join(tempDir, debugFileName) } @@ -66,7 +70,7 @@ func createDebugInfoFile(f *DefaultPortForwarder, portPair string, fs filesystem }, ObjectMeta: metav1.ObjectMeta{ Name: f.componentName, - Namespace: f.client.Namespace, + Namespace: f.projectName, }, Spec: OdoDebugFileSpec{ App: f.appName, @@ -81,7 +85,7 @@ func createDebugInfoFile(f *DefaultPortForwarder, portPair string, fs filesystem } // writes the data to the debug info file - file, err := fs.OpenFile(GetDebugInfoFilePath(f.client, f.componentName, f.appName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + file, err := fs.OpenFile(GetDebugInfoFilePath(f.componentName, f.appName, f.projectName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } @@ -102,7 +106,7 @@ func GetDebugInfo(f *DefaultPortForwarder) (OdoDebugFile, bool) { // returns true if debugging is running else false func getDebugInfo(f *DefaultPortForwarder, fs filesystem.Filesystem) (OdoDebugFile, bool) { // gets the debug info file path and reads/unmarshals it - debugInfoFilePath := GetDebugInfoFilePath(f.client, f.componentName, f.appName) + debugInfoFilePath := GetDebugInfoFilePath(f.componentName, f.appName, f.projectName) readFile, err := fs.ReadFile(debugInfoFilePath) if err != nil { klog.V(4).Infof("the debug %v is not present", debugInfoFilePath) diff --git a/pkg/debug/info_test.go b/pkg/debug/info_test.go index 7f2dce1b0e0..db728084555 100644 --- a/pkg/debug/info_test.go +++ b/pkg/debug/info_test.go @@ -7,7 +7,6 @@ import ( "reflect" "testing" - "github.com/openshift/odo/pkg/occlient" "github.com/openshift/odo/pkg/testingutil" "github.com/openshift/odo/pkg/testingutil/filesystem" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,6 +58,7 @@ func Test_createDebugInfoFile(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ componentName: "nodejs-ex", appName: "app", + projectName: "testing-1", }, portPair: "5858:9001", fs: fs, @@ -88,6 +88,7 @@ func Test_createDebugInfoFile(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ componentName: "nodejs-ex", appName: "app", + projectName: "testing-1", }, portPair: "5758:9004", fs: fs, @@ -115,12 +116,7 @@ func Test_createDebugInfoFile(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Fake the client with the appropriate arguments - client, _ := occlient.FakeNew() - client.Namespace = "testing-1" - tt.args.defaultPortForwarder.client = client - - debugFilePath := GetDebugInfoFilePath(client, tt.args.defaultPortForwarder.componentName, tt.args.defaultPortForwarder.appName) + debugFilePath := GetDebugInfoFilePath(tt.args.defaultPortForwarder.componentName, tt.args.defaultPortForwarder.appName, tt.args.defaultPortForwarder.projectName) // create a already existing file if tt.alreadyExistFile { _, err := testingutil.MkFileWithContent(debugFilePath, "blah", fs) @@ -177,6 +173,7 @@ func Test_getDebugInfo(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ appName: "app", componentName: "nodejs-ex", + projectName: "testing-1", }, fs: fs, }, @@ -222,6 +219,7 @@ func Test_getDebugInfo(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ appName: "app", componentName: "nodejs-ex", + projectName: "testing-1", }, fs: fs, }, @@ -237,6 +235,7 @@ func Test_getDebugInfo(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ appName: "app", componentName: "nodejs-ex", + projectName: "testing-1", }, fs: fs, }, @@ -267,6 +266,7 @@ func Test_getDebugInfo(t *testing.T) { defaultPortForwarder: &DefaultPortForwarder{ appName: "app", componentName: "nodejs-ex", + projectName: "testing-1", }, fs: fs, }, @@ -295,11 +295,6 @@ func Test_getDebugInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Fake the client with the appropriate arguments - client, _ := occlient.FakeNew() - client.Namespace = "testing-1" - tt.args.defaultPortForwarder.client = client - freePort, err := util.HttpGetFreePort() if err != nil { t.Errorf("error occured while getting a free port, cause: %v", err) @@ -313,7 +308,7 @@ func Test_getDebugInfo(t *testing.T) { tt.wantDebugFile.Spec.LocalPort = freePort } - odoDebugFilePath := GetDebugInfoFilePath(tt.args.defaultPortForwarder.client, tt.args.defaultPortForwarder.componentName, tt.args.defaultPortForwarder.appName) + odoDebugFilePath := GetDebugInfoFilePath(tt.args.defaultPortForwarder.componentName, tt.args.defaultPortForwarder.appName, tt.args.defaultPortForwarder.projectName) if tt.fileExists { fakeString, err := fakeOdoDebugFileString(tt.readDebugFile.TypeMeta, tt.readDebugFile.Spec.DebugProcessID, diff --git a/pkg/debug/portforward.go b/pkg/debug/portforward.go index 10df7ae47a7..e76fc37addb 100644 --- a/pkg/debug/portforward.go +++ b/pkg/debug/portforward.go @@ -1,16 +1,14 @@ package debug import ( + "github.com/openshift/odo/pkg/kclient" "github.com/openshift/odo/pkg/occlient" - - componentlabels "github.com/openshift/odo/pkg/component/labels" + "k8s.io/client-go/rest" "fmt" "net/http" "github.com/openshift/odo/pkg/log" - "github.com/openshift/odo/pkg/util" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8sgenclioptions "k8s.io/cli-runtime/pkg/genericclioptions" @@ -20,18 +18,22 @@ import ( // DefaultPortForwarder implements the SPDY based port forwarder type DefaultPortForwarder struct { - client *occlient.Client + client *occlient.Client + kClient *kclient.Client k8sgenclioptions.IOStreams componentName string appName string + projectName string } -func NewDefaultPortForwarder(componentName, appName string, client *occlient.Client, streams k8sgenclioptions.IOStreams) *DefaultPortForwarder { +func NewDefaultPortForwarder(componentName, appName string, projectName string, client *occlient.Client, kClient *kclient.Client, streams k8sgenclioptions.IOStreams) *DefaultPortForwarder { return &DefaultPortForwarder{ client: client, + kClient: kClient, IOStreams: streams, componentName: componentName, appName: appName, + projectName: projectName, } } @@ -39,15 +41,31 @@ func NewDefaultPortForwarder(componentName, appName string, client *occlient.Cli // portPair is a pair of port in format "localPort:RemotePort" that is to be forwarded // stop Chan is used to stop port forwarding // ready Chan is used to signal failure to the channel receiver -func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan chan struct{}) error { - conf, err := f.client.KubeConfig.ClientConfig() - if err != nil { - return err - } +func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan chan struct{}, isExperimental bool) error { + var pod *corev1.Pod + var conf *rest.Config + var err error - pod, err := f.getPodUsingComponentName() - if err != nil { - return err + if f.kClient != nil && isExperimental { + conf, err = f.kClient.KubeConfig.ClientConfig() + if err != nil { + return err + } + + pod, err = f.kClient.GetPodUsingComponentName(f.componentName) + if err != nil { + return err + } + } else { + conf, err = f.client.KubeConfig.ClientConfig() + if err != nil { + return err + } + + pod, err = f.client.GetPodUsingComponentName(f.componentName, f.appName) + if err != nil { + return err + } } if pod.Status.Phase != corev1.PodRunning { @@ -58,7 +76,14 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan if err != nil { return err } - req := f.client.BuildPortForwardReq(pod.Name) + + var req *rest.Request + if f.kClient != nil && isExperimental { + req = f.kClient.GeneratePortForwardReq(pod.Name) + } else { + req = f.client.BuildPortForwardReq(pod.Name) + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) fw, err := portforward.New(dialer, []string{portPair}, stopChan, readyChan, f.Out, f.ErrOut) if err != nil { @@ -67,16 +92,3 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan log.Info("Started port forwarding at ports -", portPair) return fw.ForwardPorts() } - -func (f *DefaultPortForwarder) getPodUsingComponentName() (*corev1.Pod, error) { - componentLabels := componentlabels.GetLabels(f.componentName, f.appName, false) - componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := f.client.GetOneDeploymentConfigFromSelector(componentSelector) - if err != nil { - return nil, errors.Wrap(err, "unable to get deployment for component") - } - // Find Pod for component - podSelector := fmt.Sprintf("deploymentconfig=%s", dc.Name) - - return f.client.GetOnePodFromSelector(podSelector) -} diff --git a/pkg/devfile/adapters/common/command.go b/pkg/devfile/adapters/common/command.go index 77966faac6f..017304115bb 100644 --- a/pkg/devfile/adapters/common/command.go +++ b/pkg/devfile/adapters/common/command.go @@ -7,102 +7,115 @@ import ( "github.com/openshift/odo/pkg/devfile/parser/data" "github.com/openshift/odo/pkg/devfile/parser/data/common" "k8s.io/klog" - - "github.com/pkg/errors" ) // GetCommand iterates through the devfile commands and returns the associated devfile command -func getCommand(data data.DevfileData, commandName string, required bool) (supportedCommand common.DevfileCommand, err error) { - for _, command := range data.GetCommands() { - if command.Name == commandName { - - // Get the supported actions - supportedCommandActions, err := getSupportedCommandActions(data, command) - - // None of the actions are supported so the command cannot be run - if len(supportedCommandActions) == 0 { - return supportedCommand, errors.Wrapf(err, "\nThe command \"%v\" was found but its actions are not supported", commandName) - } else if err != nil { - klog.Warning(errors.Wrapf(err, "The command \"%v\" was found but some of its actions are not supported", commandName)) +func getCommand(data data.DevfileData, commandName string, groupType common.DevfileCommandGroupType) (supportedCommand common.DevfileCommand, err error) { + + commands := data.GetCommands() + + for _, command := range commands { + + // validate command + err = validateCommand(data, command) + + if err != nil { + return common.DevfileCommand{}, err + } + + // if command is specified via flags, it has the highest priority + // search through all commands to find the specified command name + // if not found fallback to error. + if commandName != "" { + + // Update Group only custom commands (specified by odo flags) + command = updateGroupforCommand(groupType, command) + + if command.Exec.Id == commandName { + + // we have found the command with name, its groupType Should match to the flag + // e.g --build-command "mybuild" + // exec: + // id: mybuild + // group: + // kind: build + if command.Exec.Group.Kind != groupType { + return supportedCommand, fmt.Errorf("command group mismatched, command %s is of group %v in devfile.yaml", commandName, command.Exec.Group.Kind) + } + supportedCommand = command + return supportedCommand, nil } + continue + } - // The command is supported, use it - supportedCommand.Name = command.Name - supportedCommand.Actions = supportedCommandActions - supportedCommand.Attributes = command.Attributes + // if no command specified via flag, default command has the highest priority + // We need to scan all the commands to find default command + // exec.Group is a pointer, to avoid null pointer + if command.Exec.Group != nil && command.Exec.Group.Kind == groupType && command.Exec.Group.IsDefault { + supportedCommand = command return supportedCommand, nil } } - // The command was not found - msg := fmt.Sprintf("The command \"%v\" was not found in the devfile", commandName) - if required { - // Not found and required, return an error - err = fmt.Errorf(msg) - } else { - // Not found and optional, so just log it - klog.V(3).Info(msg) - } - - return -} + if commandName == "" { + // if default command is not found return the first command found for the matching type. + for _, command := range commands { -// getSupportedCommandActions returns the supported actions for a given command and any errors -// If some actions are supported and others have errors both the supported actions and an aggregated error will be returned. -func getSupportedCommandActions(data data.DevfileData, command common.DevfileCommand) (supportedCommandActions []common.DevfileCommandAction, err error) { - klog.V(3).Infof("Validating actions for command: %v ", command.Name) - - problemMsg := "" - for i, action := range command.Actions { - // Check if the command action is of type exec - err := validateAction(data, action) - if err == nil { - klog.V(3).Infof("Action %d maps to component %v", i+1, *action.Component) - supportedCommandActions = append(supportedCommandActions, action) - } else { - problemMsg += fmt.Sprintf("Problem with command \"%v\" action #%d: %v", command.Name, i+1, err) + if command.Exec.Group != nil && command.Exec.Group.Kind == groupType { + supportedCommand = command + return supportedCommand, nil + } } } - if len(problemMsg) > 0 { - err = fmt.Errorf(problemMsg) + // if any command specified via flag is not found in devfile then it is an error. + if commandName != "" { + err = fmt.Errorf("the command \"%v\" is not found in the devfile", commandName) + } else { + msg := fmt.Sprintf("the command type \"%v\" is not found in the devfile", groupType) + // if run command is not found in devfile then it is an error + if groupType == common.RunCommandGroupType { + err = fmt.Errorf(msg) + } else { + klog.V(4).Info(msg) + } } return } -// validateAction validates the given action -// 1. action has to be of type exec +// validateCommand validates the given command +// 1. command has to be of type exec // 2. component should be present -// 3. command should be present -func validateAction(data data.DevfileData, action common.DevfileCommandAction) (err error) { +// 4. command must have group +func validateCommand(data data.DevfileData, command common.DevfileCommand) (err error) { // type must be exec - if *action.Type != common.DevfileCommandTypeExec { - return fmt.Errorf("Actions must be of type \"exec\"") + if command.Exec == nil { + return fmt.Errorf("command must be of type \"exec\"") } // component must be specified - if action.Component == nil || *action.Component == "" { - return fmt.Errorf("Actions must reference a component") + if command.Exec.Component == "" { + return fmt.Errorf("exec commands must reference a component") } // must specify a command - if action.Command == nil || *action.Command == "" { - return fmt.Errorf("Actions must have a command") + if command.Exec.CommandLine == "" { + return fmt.Errorf("exec commands must have a command") } // must map to a supported component components := GetSupportedComponents(data) - isActionValid := false + isComponentValid := false for _, component := range components { - if *action.Component == *component.Alias && isComponentSupported(component) { - isActionValid = true + if command.Exec.Component == component.Container.Name { + isComponentValid = true } } - if !isActionValid { - return fmt.Errorf("The action does not map to a supported component") + if !isComponentValid { + return fmt.Errorf("the command does not map to a supported component") } return @@ -110,37 +123,34 @@ func validateAction(data data.DevfileData, action common.DevfileCommandAction) ( // GetInitCommand iterates through the components in the devfile and returns the init command func GetInitCommand(data data.DevfileData, devfileInitCmd string) (initCommand common.DevfileCommand, err error) { - if devfileInitCmd != "" { - // a init command was specified so if it is not found then it is an error - return getCommand(data, devfileInitCmd, true) - } - // a init command was not specified so if it is not found then it is not an error - return getCommand(data, string(DefaultDevfileInitCommand), false) + + return getCommand(data, devfileInitCmd, common.InitCommandGroupType) } // GetBuildCommand iterates through the components in the devfile and returns the build command func GetBuildCommand(data data.DevfileData, devfileBuildCmd string) (buildCommand common.DevfileCommand, err error) { - if devfileBuildCmd != "" { - // a build command was specified so if it is not found then it is an error - return getCommand(data, devfileBuildCmd, true) - } - // a build command was not specified so if it is not found then it is not an error - return getCommand(data, string(DefaultDevfileBuildCommand), false) + + return getCommand(data, devfileBuildCmd, common.BuildCommandGroupType) +} + +// GetDebugCommand iterates through the components in the devfile and returns the debug command +func GetDebugCommand(data data.DevfileData, devfileDebugCmd string) (debugCommand common.DevfileCommand, err error) { + return getCommand(data, devfileDebugCmd, common.DebugCommandGroupType) } // GetRunCommand iterates through the components in the devfile and returns the run command func GetRunCommand(data data.DevfileData, devfileRunCmd string) (runCommand common.DevfileCommand, err error) { - if devfileRunCmd != "" { - return getCommand(data, devfileRunCmd, true) - } - return getCommand(data, string(DefaultDevfileRunCommand), true) + + return getCommand(data, devfileRunCmd, common.RunCommandGroupType) } // ValidateAndGetPushDevfileCommands validates the build and the run command, // if provided through odo push or else checks the devfile for devBuild and devRun. // It returns the build and run commands if its validated successfully, error otherwise. -func ValidateAndGetPushDevfileCommands(data data.DevfileData, devfileInitCmd, devfileBuildCmd, devfileRunCmd string) (pushDevfileCommands []common.DevfileCommand, err error) { +func ValidateAndGetPushDevfileCommands(data data.DevfileData, devfileInitCmd, devfileBuildCmd, devfileRunCmd string) (commandMap PushCommandsMap, err error) { var emptyCommand common.DevfileCommand + commandMap = NewPushCommandMap() + isInitCommandValid, isBuildCommandValid, isRunCommandValid := false, false, false initCommand, initCmdErr := GetInitCommand(data, devfileInitCmd) @@ -149,11 +159,11 @@ func ValidateAndGetPushDevfileCommands(data data.DevfileData, devfileInitCmd, de if isInitCmdEmpty && initCmdErr == nil { // If there was no init command specified through odo push and no default init command in the devfile, default validate to true since the init command is optional isInitCommandValid = true - klog.V(3).Infof("No init command was provided") + klog.V(4).Infof("No init command was provided") } else if !isInitCmdEmpty && initCmdErr == nil { isInitCommandValid = true - pushDevfileCommands = append(pushDevfileCommands, initCommand) - klog.V(3).Infof("Init command: %v", initCommand.Name) + commandMap[common.InitCommandGroupType] = initCommand + klog.V(4).Infof("Init command: %v", initCommand.Exec.Id) } buildCommand, buildCmdErr := GetBuildCommand(data, devfileBuildCmd) @@ -162,18 +172,18 @@ func ValidateAndGetPushDevfileCommands(data data.DevfileData, devfileInitCmd, de if isBuildCmdEmpty && buildCmdErr == nil { // If there was no build command specified through odo push and no default build command in the devfile, default validate to true since the build command is optional isBuildCommandValid = true - klog.V(3).Infof("No build command was provided") + klog.V(4).Infof("No build command was provided") } else if !reflect.DeepEqual(emptyCommand, buildCommand) && buildCmdErr == nil { isBuildCommandValid = true - pushDevfileCommands = append(pushDevfileCommands, buildCommand) - klog.V(3).Infof("Build command: %v", buildCommand.Name) + commandMap[common.BuildCommandGroupType] = buildCommand + klog.V(4).Infof("Build command: %v", buildCommand.Exec.Id) } runCommand, runCmdErr := GetRunCommand(data, devfileRunCmd) if runCmdErr == nil && !reflect.DeepEqual(emptyCommand, runCommand) { - pushDevfileCommands = append(pushDevfileCommands, runCommand) isRunCommandValid = true - klog.V(3).Infof("Run command: %v", runCommand.Name) + commandMap[common.RunCommandGroupType] = runCommand + klog.V(4).Infof("Run command: %v", runCommand.Exec.Id) } // If either command had a problem, return an empty list of commands and an error @@ -188,8 +198,41 @@ func ValidateAndGetPushDevfileCommands(data data.DevfileData, devfileInitCmd, de if runCmdErr != nil { commandErrors += fmt.Sprintf(runCmdErr.Error(), "\n") } - return []common.DevfileCommand{}, fmt.Errorf(commandErrors) + return commandMap, fmt.Errorf(commandErrors) + } + + return commandMap, nil +} + +// Need to update group on custom commands specified by odo flags +func updateGroupforCommand(groupType common.DevfileCommandGroupType, command common.DevfileCommand) common.DevfileCommand { + // Update Group only for exec commands + // Update Group only when Group is not nil, devfile v2 might contain group for custom commands. + if command.Exec != nil && command.Exec.Group == nil { + command.Exec.Group = &common.Group{Kind: groupType} + return command + } + return command +} + +// ValidateAndGetDebugDevfileCommands validates the debug command +func ValidateAndGetDebugDevfileCommands(data data.DevfileData, devfileDebugCmd string) (pushDebugCommand common.DevfileCommand, err error) { + var emptyCommand common.DevfileCommand + + isDebugCommandValid := false + debugCommand, debugCmdErr := GetDebugCommand(data, devfileDebugCmd) + if debugCmdErr == nil && !reflect.DeepEqual(emptyCommand, debugCommand) { + isDebugCommandValid = true + klog.V(4).Infof("Debug command: %v", debugCommand.Exec.Id) + } + + if !isDebugCommandValid { + commandErrors := "" + if debugCmdErr != nil { + commandErrors += debugCmdErr.Error() + } + return common.DevfileCommand{}, fmt.Errorf(commandErrors) } - return pushDevfileCommands, nil + return debugCommand, nil } diff --git a/pkg/devfile/adapters/common/command_test.go b/pkg/devfile/adapters/common/command_test.go index c6efc7052f3..dc01835aaa7 100644 --- a/pkg/devfile/adapters/common/command_test.go +++ b/pkg/devfile/adapters/common/command_test.go @@ -10,214 +10,166 @@ import ( "github.com/openshift/odo/pkg/testingutil" ) +var buildGroup = common.BuildCommandGroupType +var runGroup = common.RunCommandGroupType +var initGroup = common.InitCommandGroupType + func TestGetCommand(t *testing.T) { commands := [...]string{"ls -la", "pwd"} components := [...]string{"alias1", "alias2"} invalidComponent := "garbagealias" workDir := [...]string{"/", "/root"} - validCommandType := common.DevfileCommandTypeExec - invalidCommandType := common.DevfileCommandType("garbage") + emptyString := "" tests := []struct { - name string - requestedCommands []string - commandActions []common.DevfileCommandAction - isCommandRequired []bool - wantErr bool + name string + requestedType []common.DevfileCommandGroupType + execCommands []common.Exec + groupType []common.DevfileCommandGroupType + reqCommandName string + retCommandName string + wantErr bool }{ { - name: "Case 1: Valid devfile", - requestedCommands: []string{"devbuild", "devrun"}, - commandActions: []common.DevfileCommandAction{ - { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, - }, + name: "Case 1: Valid devfile", + execCommands: []versionsCommon.Exec{ + getExecCommand("", buildGroup), + getExecCommand("", runGroup), }, - isCommandRequired: []bool{false, false, true}, - wantErr: false, + requestedType: []common.DevfileCommandGroupType{buildGroup, runGroup}, + wantErr: false, }, { - name: "Case 2: Valid devfile with devinit and devbuild", - requestedCommands: []string{"devinit", "devbuild", "devrun"}, - commandActions: []versionsCommon.DevfileCommandAction{ - { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, - }, + name: "Case 2: Valid devfile with devinit and devbuild", + execCommands: []versionsCommon.Exec{ + getExecCommand("", buildGroup), + getExecCommand("", runGroup), }, - isCommandRequired: []bool{false, false, true}, - wantErr: false, + requestedType: []common.DevfileCommandGroupType{initGroup, buildGroup, runGroup}, + wantErr: false, }, { - name: "Case 3: Valid devfile with devinit and devrun", - requestedCommands: []string{"devinit", "devrun"}, - commandActions: []versionsCommon.DevfileCommandAction{ - { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, - }, + name: "Case 3: Valid devfile with devinit and devrun", + execCommands: []versionsCommon.Exec{ + getExecCommand("", initGroup), + getExecCommand("", runGroup), }, - isCommandRequired: []bool{false, false, true}, - wantErr: false, + requestedType: []common.DevfileCommandGroupType{initGroup, runGroup}, + wantErr: false, }, { - name: "Case 4: Wrong command requested", - requestedCommands: []string{"garbage1"}, - commandActions: []common.DevfileCommandAction{ + name: "Case 4: Invalid devfile with empty component", + execCommands: []versionsCommon.Exec{ { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, + CommandLine: commands[0], + Component: emptyString, + WorkingDir: workDir[0], + Group: &versionsCommon.Group{Kind: initGroup}, }, }, - isCommandRequired: []bool{true}, - wantErr: true, + requestedType: []common.DevfileCommandGroupType{initGroup}, + wantErr: true, }, { - name: "Case 5: Invalid devfile with wrong devinit command type", - requestedCommands: []string{"devinit"}, - commandActions: []versionsCommon.DevfileCommandAction{ + name: "Case 5: Invalid devfile with empty devinit command", + execCommands: []versionsCommon.Exec{ { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &invalidCommandType, + CommandLine: emptyString, + Component: components[0], + WorkingDir: workDir[0], + Group: &versionsCommon.Group{Kind: initGroup}, }, }, - isCommandRequired: []bool{true}, - wantErr: true, + requestedType: []common.DevfileCommandGroupType{initGroup}, + wantErr: true, }, { - name: "Case 6: Invalid devfile with empty devinit component", - requestedCommands: []string{"devinit"}, - commandActions: []versionsCommon.DevfileCommandAction{ + name: "Case 6: Valid devfile with empty workdir", + execCommands: []common.Exec{ { - Command: &commands[0], - Component: &emptyString, - Workdir: &workDir[0], - Type: &validCommandType, + CommandLine: commands[0], + Component: components[0], + Group: &versionsCommon.Group{Kind: runGroup}, }, }, - isCommandRequired: []bool{false}, - wantErr: true, + requestedType: []common.DevfileCommandGroupType{runGroup}, + wantErr: false, }, { - name: "Case 7: Invalid devfile with empty devinit command", - requestedCommands: []string{"devinit"}, - commandActions: []versionsCommon.DevfileCommandAction{ + name: "Case 7: Invalid command referencing an absent component", + execCommands: []common.Exec{ { - Command: &emptyString, - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, + CommandLine: commands[0], + Component: invalidComponent, + Group: &versionsCommon.Group{Kind: runGroup}, }, }, - isCommandRequired: []bool{false}, - wantErr: true, + requestedType: []common.DevfileCommandGroupType{runGroup}, + wantErr: true, }, { - name: "Case 8: Invalid devfile with wrong devbuild command type", - requestedCommands: []string{"devbuild"}, - commandActions: []common.DevfileCommandAction{ + name: "Case 8: Mismatched command type", + execCommands: []common.Exec{ { - Command: &commands[0], - Component: &components[0], - Workdir: &workDir[0], - Type: &invalidCommandType, + Id: "build command", + CommandLine: commands[0], + Component: components[0], + Group: &versionsCommon.Group{Kind: runGroup}, }, }, - isCommandRequired: []bool{true}, - wantErr: true, + reqCommandName: "build command", + requestedType: []common.DevfileCommandGroupType{buildGroup}, + wantErr: true, }, { - name: "Case 9: Invalid devfile with empty devbuild component", - requestedCommands: []string{"devbuild"}, - commandActions: []common.DevfileCommandAction{ + name: "Case 9: Default command is returned", + execCommands: []common.Exec{ { - Command: &commands[0], - Component: &emptyString, - Workdir: &workDir[0], - Type: &validCommandType, + Id: "defaultRunCommand", + CommandLine: commands[0], + Component: components[0], + Group: &versionsCommon.Group{Kind: runGroup, IsDefault: true}, }, - }, - isCommandRequired: []bool{false}, - wantErr: true, - }, - { - name: "Case 10: Invalid devfile with empty devbuild command", - requestedCommands: []string{"devbuild"}, - commandActions: []common.DevfileCommandAction{ { - Command: &emptyString, - Component: &components[0], - Workdir: &workDir[0], - Type: &validCommandType, + Id: "runCommand", + CommandLine: commands[0], + Component: components[0], + Group: &versionsCommon.Group{Kind: runGroup}, }, }, - isCommandRequired: []bool{false}, - wantErr: true, - }, - { - name: "Case 11: Valid devfile with empty workdir", - requestedCommands: []string{"devrun"}, - commandActions: []common.DevfileCommandAction{ - { - Command: &commands[0], - Component: &components[0], - Type: &validCommandType, - }, - }, - isCommandRequired: []bool{true}, - wantErr: false, - }, - { - name: "Case 12: Invalid command referencing an absent component", - requestedCommands: []string{"devrun"}, - commandActions: []common.DevfileCommandAction{ - { - Command: &commands[0], - Component: &invalidComponent, - Type: &validCommandType, - }, - }, - isCommandRequired: []bool{true}, - wantErr: true, + retCommandName: "defaultRunCommand", + requestedType: []common.DevfileCommandGroupType{runGroup}, + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + components := []common.DevfileComponent{testingutil.GetFakeComponent(tt.execCommands[0].Component)} + if tt.execCommands[0].Component == invalidComponent { + components = []common.DevfileComponent{testingutil.GetFakeComponent("randomComponent")} + } devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: tt.commandActions, - ComponentType: common.DevfileComponentTypeDockerimage, + ExecCommands: tt.execCommands, + Components: components, }, } - for i, commandName := range tt.requestedCommands { - command, err := getCommand(devObj.Data, commandName, tt.isCommandRequired[i]) + for _, gtype := range tt.requestedType { + cmd, err := getCommand(devObj.Data, tt.reqCommandName, gtype) if !tt.wantErr == (err != nil) { - t.Errorf("TestGetCommand unexpected error for command: %v wantErr: %v err: %v", commandName, tt.wantErr, err) + t.Errorf("TestGetCommand unexpected error for command: %v wantErr: %v err: %v", gtype, tt.wantErr, err) return } else if tt.wantErr { return } - if command.Name != commandName { - t.Errorf("TestGetCommand error: command names do not match expected: %v actual: %v", commandName, command.Name) - } - - if len(command.Actions) != 1 { - t.Errorf("TestGetCommand error: command %v do not have the correct number of actions actual: %v", commandName, len(command.Actions)) + if cmd.Exec != nil { + if cmd.Exec.Id != tt.retCommandName { + t.Errorf("TestGetCommand error: command names do not match expected: %v actual: %v", tt.retCommandName, cmd.Exec.Id) + } } } }) @@ -225,234 +177,222 @@ func TestGetCommand(t *testing.T) { } -func TestGetSupportedCommandActions(t *testing.T) { +func TestValidateAction(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec - invalidCommandType := common.DevfileCommandType("garbage") + emptyString := "" tests := []struct { name string - command common.DevfileCommand + exec common.Exec wantErr bool }{ { - name: "Case: Valid Command Action", - command: common.DevfileCommand{ - Name: "testCommand", - Actions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, + name: "Case: Valid Exec Command", + exec: common.Exec{ + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, }, wantErr: false, }, { - name: "Case: Invalid Command Action with empty command", - command: common.DevfileCommand{ - Name: "testCommand", - Actions: []common.DevfileCommandAction{ - { - Command: &emptyString, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, + name: "Case: Invalid Exec Command with empty command", + exec: common.Exec{ + CommandLine: emptyString, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, }, wantErr: true, }, { - name: "Case: Invalid Command Action with missing component", - command: common.DevfileCommand{ - Name: "testCommand", - Actions: []common.DevfileCommandAction{ - { - Command: &command, - Workdir: &workDir, - Type: &validCommandType, - }, - }, + name: "Case: Invalid Exec Command with missing component", + exec: common.Exec{ + CommandLine: command, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, }, wantErr: true, }, { - name: "Case: Invalid Command Action with wrong type", - command: common.DevfileCommand{ - Name: "testCommand", - Actions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &invalidCommandType, - }, - }, + name: "Case: valid Exec Command with Group nil", + exec: common.Exec{ + CommandLine: command, + Component: component, + WorkingDir: workDir, }, - wantErr: true, + wantErr: false, }, } for _, tt := range tests { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Type: &validCommandType, - }, - }, - ComponentType: common.DevfileComponentTypeDockerimage, + ExecCommands: []common.Exec{tt.exec}, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, }, } t.Run(tt.name, func(t *testing.T) { - supportedCommandActions, _ := getSupportedCommandActions(devObj.Data, tt.command) - if !tt.wantErr && len(supportedCommandActions) != len(tt.command.Actions) { - t.Errorf("TestGetSupportedCommandActions error: incorrect number of command actions expected: %v actual: %v", len(tt.command.Actions), len(supportedCommandActions)) - } else if tt.wantErr && len(supportedCommandActions) != 0 { - t.Errorf("TestGetSupportedCommandActions error: incorrect number of command actions expected: %v actual: %v", 0, len(supportedCommandActions)) + cmd := common.DevfileCommand{Exec: &tt.exec} + err := validateCommand(devObj.Data, cmd) + if !tt.wantErr == (err != nil) { + t.Errorf("TestValidateAction unexpected error: %v", err) + return } }) } } -func TestValidateAction(t *testing.T) { +func TestGetInitCommand(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec - invalidCommandType := common.DevfileCommandType("garbage") emptyString := "" + var emptyCommand common.DevfileCommand + tests := []struct { - name string - action common.DevfileCommandAction - wantErr bool + name string + commandName string + execCommands []common.Exec + wantErr bool }{ { - name: "Case: Valid Command Action", - action: common.DevfileCommandAction{ - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + name: "Case: Default Init Command", + commandName: emptyString, + execCommands: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup, IsDefault: true}, + }, }, wantErr: false, }, { - name: "Case: Invalid Command Action with empty command", - action: common.DevfileCommandAction{ - Command: &emptyString, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - wantErr: true, - }, - { - name: "Case: Invalid Command Action with missing component", - action: common.DevfileCommandAction{ - Command: &command, - Workdir: &workDir, - Type: &validCommandType, + name: "Case: Init Command passed through odo flag", + commandName: "flagcommand", + execCommands: []versionsCommon.Exec{ + { + Id: "flagcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup}, + }, + { + Id: "init command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup}, + }, }, - wantErr: true, + wantErr: false, }, { - name: "Case: Invalid Command Action with wrong type", - action: common.DevfileCommandAction{ - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &invalidCommandType, + name: "Case: Missing Init Command", + commandName: "customcommand123", + execCommands: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup}, + }, }, wantErr: true, }, } + for _, tt := range tests { - devObj := devfileParser.DevfileObj{ - Data: testingutil.TestDevfileData{ - CommandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Type: &validCommandType, - }, - }, - ComponentType: common.DevfileComponentTypeDockerimage, - }, - } t.Run(tt.name, func(t *testing.T) { - err := validateAction(devObj.Data, tt.action) + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + ExecCommands: tt.execCommands, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, + }, + } + + command, err := GetInitCommand(devObj.Data, tt.commandName) + if !tt.wantErr == (err != nil) { - t.Errorf("TestValidateAction unexpected error: %v", err) - return + t.Errorf("TestGetInitCommand: unexpected error for command \"%v\" expected: %v actual: %v", tt.commandName, tt.wantErr, err) + } else if !tt.wantErr && reflect.DeepEqual(emptyCommand, command) { + t.Errorf("TestGetInitCommand: unexpected empty command returned for command: %v", tt.commandName) } + }) } } -func TestGetInitCommand(t *testing.T) { +func TestGetBuildCommand(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec emptyString := "" var emptyCommand common.DevfileCommand tests := []struct { - name string - commandName string - commandActions []common.DevfileCommandAction - wantErr bool + name string + commandName string + execCommands []common.Exec + wantErr bool }{ { - name: "Case: Default Init Command", + name: "Case 1: Default Build Command", commandName: emptyString, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: buildGroup, IsDefault: true}, }, }, wantErr: false, }, { - name: "Case: Custom Init Command", - commandName: "customcommand", - commandActions: []versionsCommon.DevfileCommandAction{ + name: "Case 2: Build Command passed through the odo flag", + commandName: "flagcommand", + execCommands: []common.Exec{ + { + Id: "flagcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: buildGroup}, + }, { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "build command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: buildGroup}, }, }, wantErr: false, }, { - name: "Case: Missing Init Command", + name: "Case 3: Missing Build Command", commandName: "customcommand123", - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "build command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: buildGroup}, }, }, wantErr: true, @@ -463,17 +403,17 @@ func TestGetInitCommand(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: tt.commandActions, - ComponentType: versionsCommon.DevfileComponentTypeDockerimage, + ExecCommands: tt.execCommands, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, }, } - command, err := GetInitCommand(devObj.Data, tt.commandName) + command, err := GetBuildCommand(devObj.Data, tt.commandName) if !tt.wantErr == (err != nil) { - t.Errorf("TestGetInitCommand: unexpected error for command \"%v\" expected: %v actual: %v", tt.commandName, tt.wantErr, err) + t.Errorf("TestGetBuildCommand: unexpected error for command \"%v\" expected: %v actual: %v", tt.commandName, tt.wantErr, err) } else if !tt.wantErr && reflect.DeepEqual(emptyCommand, command) { - t.Errorf("TestGetInitCommand: unexpected empty command returned for command: %v", tt.commandName) + t.Errorf("TestGetBuildCommand: unexpected empty command returned for command: %v", tt.commandName) } }) @@ -481,57 +421,66 @@ func TestGetInitCommand(t *testing.T) { } -func TestGetBuildCommand(t *testing.T) { +func TestGetDebugCommand(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec emptyString := "" var emptyCommand common.DevfileCommand tests := []struct { - name string - commandName string - commandActions []common.DevfileCommandAction - wantErr bool + name string + commandName string + execCommands []common.Exec + wantErr bool }{ { - name: "Case: Default Build Command", + name: "Case: Default Debug Command", commandName: emptyString, - commandActions: []common.DevfileCommandAction{ + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{ + IsDefault: true, + Kind: versionsCommon.DebugCommandGroupType, + }, }, }, wantErr: false, }, { - name: "Case: Custom Build Command", - commandName: "customcommand", - commandActions: []common.DevfileCommandAction{ + name: "Case: Custom Debug Command", + commandName: "customdebugcommand", + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "customdebugcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{ + IsDefault: false, + Kind: versionsCommon.DebugCommandGroupType, + }, }, }, wantErr: false, }, { - name: "Case: Missing Build Command", + name: "Case: Missing Debug Command", commandName: "customcommand123", - commandActions: []common.DevfileCommandAction{ + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{ + IsDefault: true, + Kind: versionsCommon.BuildCommandGroupType, + }, }, }, wantErr: true, @@ -542,22 +491,24 @@ func TestGetBuildCommand(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: tt.commandActions, - ComponentType: common.DevfileComponentTypeDockerimage, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, + ExecCommands: tt.execCommands, }, } - command, err := GetBuildCommand(devObj.Data, tt.commandName) + command, err := GetDebugCommand(devObj.Data, tt.commandName) - if !tt.wantErr == (err != nil) { - t.Errorf("TestGetBuildCommand: unexpected error for command \"%v\" expected: %v actual: %v", tt.commandName, tt.wantErr, err) - } else if !tt.wantErr && reflect.DeepEqual(emptyCommand, command) { - t.Errorf("TestGetBuildCommand: unexpected empty command returned for command: %v", tt.commandName) + if tt.wantErr && err == nil { + t.Errorf("Error was expected but got no error") + } else if !tt.wantErr { + if err != nil { + t.Errorf("TestGetDebugCommand: unexpected error for command \"%v\" expected: %v actual: %v", tt.commandName, tt.wantErr, err) + } else if reflect.DeepEqual(emptyCommand, command) { + t.Errorf("TestGetDebugCommand: unexpected empty command returned for command: %v", tt.commandName) + } } - }) } - } func TestGetRunCommand(t *testing.T) { @@ -565,52 +516,59 @@ func TestGetRunCommand(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec emptyString := "" var emptyCommand common.DevfileCommand tests := []struct { - name string - commandName string - commandActions []common.DevfileCommandAction - wantErr bool + name string + commandName string + execCommands []common.Exec + wantErr bool }{ { - name: "Case: Default Run Command", + name: "Case 1: Default Run Command", commandName: emptyString, - commandActions: []common.DevfileCommandAction{ + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup, IsDefault: true}, }, }, wantErr: false, }, { - name: "Case: Custom Run Command", - commandName: "customcommand", - commandActions: []common.DevfileCommandAction{ + name: "Case 2: Run Command passed through odo flag", + commandName: "flagcommand", + execCommands: []common.Exec{ + { + Id: "flagcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, + }, { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "run command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, }, }, wantErr: false, }, { - name: "Case: Missing Run Command", - commandName: "customcommand123", - commandActions: []common.DevfileCommandAction{ + name: "Case 3: Missing Run Command", + commandName: "", + execCommands: []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup}, }, }, wantErr: true, @@ -621,8 +579,8 @@ func TestGetRunCommand(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: tt.commandActions, - ComponentType: common.DevfileComponentTypeDockerimage, + ExecCommands: tt.execCommands, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, }, } @@ -638,21 +596,138 @@ func TestGetRunCommand(t *testing.T) { } +func TestValidateAndGetDebugDevfileCommands(t *testing.T) { + + command := "ls -la" + component := "alias1" + workDir := "/" + emptyString := "" + + execCommands := []common.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &common.Group{ + IsDefault: true, + Kind: common.DebugCommandGroupType, + }, + }, + { + Id: "customdebugcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &common.Group{ + IsDefault: false, + Kind: common.DebugCommandGroupType, + }, + }, + } + + tests := []struct { + name string + debugCommand string + componentType common.DevfileComponentType + wantErr bool + }{ + { + name: "Case: Default Devfile Commands", + debugCommand: emptyString, + componentType: common.ContainerComponentType, + wantErr: false, + }, + { + name: "Case: provided debug Command", + debugCommand: "customdebugcommand", + componentType: versionsCommon.ContainerComponentType, + wantErr: false, + }, + { + name: "Case: invalid debug Command", + debugCommand: "invaliddebugcommand", + componentType: versionsCommon.ContainerComponentType, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, + ExecCommands: execCommands, + }, + } + + debugCommand, err := ValidateAndGetDebugDevfileCommands(devObj.Data, tt.debugCommand) + + if tt.wantErr { + if err == nil { + t.Errorf("Error was expected but got no error") + } else { + return + } + } else { + if err != nil { + t.Errorf("TestValidateAndGetDebugDevfileCommands: unexpected error %v", err) + } + } + + if !reflect.DeepEqual(nil, debugCommand) && debugCommand.Exec.Id != tt.debugCommand { + t.Errorf("TestValidateAndGetDebugDevfileCommands name of debug command is wrong want: %v got: %v", tt.debugCommand, debugCommand.Exec.Id) + } + }) + } +} + func TestValidateAndGetPushDevfileCommands(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/" - validCommandType := common.DevfileCommandTypeExec emptyString := "" - actions := []common.DevfileCommandAction{ + execCommands := []common.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "run command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, }, + + { + Id: "build command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: buildGroup}, + }, + + { + Id: "init command", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: initGroup}, + }, + { + Id: "customcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, + }, + } + + wrongCompTypeCmd := common.Exec{ + + Id: "run command", + CommandLine: command, + Component: "", + WorkingDir: workDir, + Group: &versionsCommon.Group{Kind: runGroup}, } tests := []struct { @@ -660,95 +735,125 @@ func TestValidateAndGetPushDevfileCommands(t *testing.T) { initCommand string buildCommand string runCommand string + execCommands []common.Exec numberOfCommands int - componentType common.DevfileComponentType missingInitCommand bool missingBuildCommand bool wantErr bool }{ { - name: "Case: Default Devfile Commands", + name: "Case 1: Default Devfile Commands", initCommand: emptyString, buildCommand: emptyString, runCommand: emptyString, + execCommands: execCommands, numberOfCommands: 3, - componentType: common.DevfileComponentTypeDockerimage, wantErr: false, }, { - name: "Case: Default Init and Build Command, and Provided Run Command", + name: "Case 2: Default Init and Build Command, and Provided Run Command", initCommand: emptyString, buildCommand: emptyString, runCommand: "customcommand", + execCommands: execCommands, numberOfCommands: 3, - componentType: common.DevfileComponentTypeDockerimage, wantErr: false, }, { - name: "Case: No Dockerimage Component", + name: "Case 3: Empty Component", initCommand: emptyString, buildCommand: "customcommand", runCommand: "customcommand", + execCommands: append(execCommands, wrongCompTypeCmd), numberOfCommands: 0, - componentType: "", wantErr: true, }, { - name: "Case: Provided Wrong Build Command and Provided Run Command", + name: "Case 4: Provided Wrong Build Command and Provided Run Command", initCommand: emptyString, buildCommand: "customcommand123", runCommand: "customcommand", + execCommands: execCommands, numberOfCommands: 1, - componentType: common.DevfileComponentTypeDockerimage, wantErr: true, }, { - name: "Case: Provided Wrong Init Command and Provided Build and Run Command", + name: "Case 5: Provided Wrong Init Command and Provided Build and Run Command", initCommand: "customcommand123", buildCommand: emptyString, runCommand: "customcommand", + execCommands: execCommands, numberOfCommands: 1, - componentType: versionsCommon.DevfileComponentTypeDockerimage, wantErr: true, }, { - name: "Case: Missing Init and Build Command, and Provided Run Command", - initCommand: emptyString, - buildCommand: emptyString, - runCommand: "customcommand", - numberOfCommands: 1, - componentType: common.DevfileComponentTypeDockerimage, - missingInitCommand: true, - missingBuildCommand: true, - wantErr: false, + name: "Case 6: Missing Init and Build Command, and Provided Run Command", + initCommand: emptyString, + buildCommand: emptyString, + runCommand: "customcommand", + execCommands: []common.Exec{ + { + Id: "customcommand", + Group: &common.Group{Kind: runGroup}, + Component: component, + CommandLine: command, + }, + }, + numberOfCommands: 1, + wantErr: false, }, { - name: "Case: Missing Init Command with provided Build and Run Command", - initCommand: emptyString, - buildCommand: "customcommand", - runCommand: "customcommand", + name: "Case 7: Missing Init Command with provided Build and Run Command", + initCommand: emptyString, + buildCommand: "build command", + runCommand: "run command", + execCommands: []common.Exec{ + { + Id: "build command", + Group: &common.Group{Kind: buildGroup}, + Component: component, + CommandLine: command, + }, + { + Id: "run command", + Group: &common.Group{Kind: runGroup}, + Component: component, + CommandLine: command, + }, + }, numberOfCommands: 2, - componentType: versionsCommon.DevfileComponentTypeDockerimage, missingInitCommand: true, wantErr: false, }, { - name: "Case: Missing Build Command with provided Init and Run Command", - initCommand: "customcommand", - buildCommand: emptyString, - runCommand: "customcommand", - numberOfCommands: 2, - componentType: versionsCommon.DevfileComponentTypeDockerimage, - missingBuildCommand: true, - wantErr: false, + name: "Case 8: Missing Build Command with provided Init and Run Command", + initCommand: "init command", + buildCommand: emptyString, + runCommand: "run command", + execCommands: []common.Exec{ + { + Id: "init command", + Group: &common.Group{Kind: initGroup}, + Component: component, + CommandLine: command, + }, + { + Id: "run command", + Group: &common.Group{Kind: runGroup}, + Component: component, + CommandLine: command, + }, + }, + numberOfCommands: 2, + wantErr: false, }, { - name: "Case: Optional Init Command with provided Build and Run Command", - initCommand: "customcommand", - buildCommand: "customcommand", - runCommand: "customcommand", + name: "Case 9: Optional Init Command with provided Build and Run Command", + initCommand: "init command", + buildCommand: "build command", + runCommand: "run command", + execCommands: execCommands, numberOfCommands: 3, - componentType: versionsCommon.DevfileComponentTypeDockerimage, wantErr: false, }, } @@ -757,10 +862,8 @@ func TestValidateAndGetPushDevfileCommands(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - CommandActions: actions, - ComponentType: tt.componentType, - MissingInitCommand: tt.missingInitCommand, - MissingBuildCommand: tt.missingBuildCommand, + ExecCommands: tt.execCommands, + Components: []common.DevfileComponent{testingutil.GetFakeComponent(component)}, }, } @@ -778,3 +881,19 @@ func TestValidateAndGetPushDevfileCommands(t *testing.T) { } } + +func getExecCommand(id string, group common.DevfileCommandGroupType) versionsCommon.Exec { + + commands := [...]string{"ls -la", "pwd"} + components := [...]string{"alias1", "alias2"} + workDir := [...]string{"/", "/root"} + + return versionsCommon.Exec{ + Id: id, + CommandLine: commands[0], + Component: components[0], + WorkingDir: workDir[0], + Group: &common.Group{Kind: group}, + } + +} diff --git a/pkg/devfile/adapters/common/types.go b/pkg/devfile/adapters/common/types.go index 3a4cb3882fb..5e762a957a0 100644 --- a/pkg/devfile/adapters/common/types.go +++ b/pkg/devfile/adapters/common/types.go @@ -2,6 +2,7 @@ package common import ( devfileParser "github.com/openshift/odo/pkg/devfile/parser" + "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/envinfo" ) @@ -14,9 +15,9 @@ type AdapterContext struct { // DevfileVolume is a struct for Devfile volume that is common to all the adapters type DevfileVolume struct { - Name *string - ContainerPath *string - Size *string + Name string + ContainerPath string + Size string } // Storage is a struct that is common to all the adapters @@ -36,7 +37,10 @@ type PushParameters struct { DevfileInitCmd string // DevfileInitCmd takes the init command through the command line and overwrites devfile init command DevfileBuildCmd string // DevfileBuildCmd takes the build command through the command line and overwrites devfile build command DevfileRunCmd string // DevfileRunCmd takes the run command through the command line and overwrites devfile run command + DevfileDebugCmd string // DevfileDebugCmd takes the debug command through the command line and overwrites the devfile debug command EnvSpecificInfo envinfo.EnvSpecificInfo // EnvSpecificInfo contains infomation of env.yaml file + Debug bool // Runs the component in debug mode + DebugPort int // Port used for remote debugging } // SyncParameters is a struct containing the parameters to be used when syncing a devfile component @@ -52,3 +56,11 @@ type ComponentInfo struct { PodName string ContainerName string } + +// PushCommandsMap stores the commands to be executed as per their types. +type PushCommandsMap map[common.DevfileCommandGroupType]common.DevfileCommand + +// NewPushCommandMap returns the instance of PushCommandsMap +func NewPushCommandMap() PushCommandsMap { + return make(map[common.DevfileCommandGroupType]common.DevfileCommand) +} diff --git a/pkg/devfile/adapters/common/utils.go b/pkg/devfile/adapters/common/utils.go index 7a754804aa4..7083fb2f9ce 100644 --- a/pkg/devfile/adapters/common/utils.go +++ b/pkg/devfile/adapters/common/utils.go @@ -24,6 +24,9 @@ const ( // DefaultDevfileRunCommand is a predefined devfile command for run DefaultDevfileRunCommand PredefinedDevfileCommands = "devrun" + // DefaultDevfileDebugCommand is a predefined devfile command for debug + DefaultDevfileDebugCommand PredefinedDevfileCommands = "debugrun" + // SupervisordInitContainerName The init container name for supervisord SupervisordInitContainerName = "copy-supervisord" @@ -67,6 +70,15 @@ const ( // EnvOdoCommandRun is the env defined in the runtime component container which holds the run command to be executed EnvOdoCommandRun = "ODO_COMMAND_RUN" + // EnvOdoCommandDebugWorkingDir is the env defined in the runtime component container which holds the work dir for the debug command + EnvOdoCommandDebugWorkingDir = "ODO_COMMAND_DEBUG_WORKING_DIR" + + // EnvOdoCommandDebug is the env defined in the runtime component container which holds the debug command to be executed + EnvOdoCommandDebug = "ODO_COMMAND_DEBUG" + + // EnvDebugPort is the env defined in the runtime component container which holds the debug port for remote debugging + EnvDebugPort = "DEBUG_PORT" + // ShellExecutable is the shell executable ShellExecutable = "/bin/sh" @@ -81,8 +93,12 @@ type CommandNames struct { } func isComponentSupported(component common.DevfileComponent) bool { - // Currently odo only uses devfile components of type dockerimage, since most of the Che registry devfiles use it - return component.Type == common.DevfileComponentTypeDockerimage + // Currently odo only uses devfile components of type container, since most of the Che registry devfiles use it + if component.Container != nil { + klog.V(4).Infof("Found component \"%v\" with name \"%v\"\n", common.ContainerComponentType, component.Container.Name) + return true + } + return false } // GetBootstrapperImage returns the odo-init bootstrapper image @@ -99,7 +115,6 @@ func GetSupportedComponents(data data.DevfileData) []common.DevfileComponent { // Only components with aliases are considered because without an alias commands cannot reference them for _, comp := range data.GetAliasedComponents() { if isComponentSupported(comp) { - klog.V(3).Infof("Found component \"%v\" with alias \"%v\"\n", comp.Type, *comp.Alias) components = append(components, comp) } } @@ -112,14 +127,14 @@ func GetVolumes(devfileObj devfileParser.DevfileObj) map[string][]DevfileVolume componentAliasToVolumes := make(map[string][]DevfileVolume) size := volumeSize for _, comp := range GetSupportedComponents(devfileObj.Data) { - if comp.Volumes != nil { - for _, volume := range comp.Volumes { + if len(comp.Container.VolumeMounts) != 0 { + for _, volume := range comp.Container.VolumeMounts { vol := DevfileVolume{ Name: volume.Name, - ContainerPath: volume.ContainerPath, - Size: &size, + ContainerPath: volume.Path, + Size: size, } - componentAliasToVolumes[*comp.Alias] = append(componentAliasToVolumes[*comp.Alias], vol) + componentAliasToVolumes[comp.Container.Name] = append(componentAliasToVolumes[comp.Container.Name], vol) } } } @@ -127,9 +142,9 @@ func GetVolumes(devfileObj devfileParser.DevfileObj) map[string][]DevfileVolume } // IsEnvPresent checks if the env variable is present in an array of env variables -func IsEnvPresent(envVars []common.DockerimageEnv, envVarName string) bool { +func IsEnvPresent(envVars []common.Env, envVarName string) bool { for _, envVar := range envVars { - if *envVar.Name == envVarName { + if envVar.Name == envVarName { return true } } @@ -138,9 +153,9 @@ func IsEnvPresent(envVars []common.DockerimageEnv, envVarName string) bool { } // IsPortPresent checks if the port is present in the endpoints array -func IsPortPresent(endpoints []common.DockerimageEndpoint, port int) bool { +func IsPortPresent(endpoints []common.Endpoint, port int) bool { for _, endpoint := range endpoints { - if *endpoint.Port == int32(port) { + if endpoint.TargetPort == int32(port) { return true } } @@ -152,7 +167,7 @@ func IsPortPresent(endpoints []common.DockerimageEndpoint, port int) bool { func IsRestartRequired(command common.DevfileCommand) bool { var restart = true var err error - rs, ok := command.Attributes["restart"] + rs, ok := command.Exec.Attributes["restart"] if ok { restart, err = strconv.ParseBool(rs) // Ignoring error here as restart is true for all error and default cases. diff --git a/pkg/devfile/adapters/common/utils_test.go b/pkg/devfile/adapters/common/utils_test.go index 3b8bd6ef632..3e8b9e59ab6 100644 --- a/pkg/devfile/adapters/common/utils_test.go +++ b/pkg/devfile/adapters/common/utils_test.go @@ -8,75 +8,56 @@ import ( "github.com/openshift/odo/pkg/devfile/parser/data/common" versionsCommon "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/testingutil" - "github.com/openshift/odo/pkg/util" ) func TestGetSupportedComponents(t *testing.T) { tests := []struct { name string - componentType versionsCommon.DevfileComponentType + component []versionsCommon.DevfileComponent alias []string expectedMatchesCount int }{ { - name: "Case: Invalid devfile", - componentType: "", + name: "Case 1: Invalid devfile", + component: []versionsCommon.DevfileComponent{}, expectedMatchesCount: 0, }, { - name: "Case: Valid devfile with wrong component type (CheEditor)", - componentType: versionsCommon.DevfileComponentTypeCheEditor, - alias: []string{"alias1", "alias2"}, + name: "Case 2: Valid devfile with wrong component type (Openshift)", + component: []versionsCommon.DevfileComponent{{Openshift: &versionsCommon.Openshift{}}}, expectedMatchesCount: 0, }, { - name: "Case: Valid devfile with wrong component type (ChePlugin)", - componentType: versionsCommon.DevfileComponentTypeChePlugin, - alias: []string{"alias1", "alias2"}, - expectedMatchesCount: 0, - }, - { - name: "Case: Valid devfile with wrong component type (Kubernetes)", - componentType: versionsCommon.DevfileComponentTypeKubernetes, - alias: []string{"alias1", "alias2"}, + name: "Case 3: Valid devfile with wrong component type (Kubernetes)", + component: []versionsCommon.DevfileComponent{{Kubernetes: &versionsCommon.Kubernetes{}}}, expectedMatchesCount: 0, }, + { - name: "Case: Valid devfile with wrong component type (Openshift)", - componentType: versionsCommon.DevfileComponentTypeOpenshift, - alias: []string{"alias1", "alias2"}, - expectedMatchesCount: 0, + name: "Case 4 : Valid devfile with correct component type (Container)", + component: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("comp1"), testingutil.GetFakeComponent("comp2")}, + expectedMatchesCount: 2, }, + { - name: "Case: Valid devfile with correct component type (Dockerimage)", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - alias: []string{"alias1", "alias2"}, - expectedMatchesCount: 2, + name: "Case 5: Valid devfile with correct component type (Container) without name", + component: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("comp1"), testingutil.GetFakeComponent("")}, + expectedMatchesCount: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: tt.component, }, } devfileComponents := GetSupportedComponents(devObj.Data) - componentsMatched := 0 - for _, component := range devfileComponents { - if component.Type != versionsCommon.DevfileComponentTypeDockerimage { - t.Errorf("TestGetSupportedComponents error: wrong component type expected %v, actual %v", versionsCommon.DevfileComponentTypeDockerimage, component.Type) - } - if util.In(tt.alias, *component.Alias) { - componentsMatched++ - } - } - - if componentsMatched != tt.expectedMatchesCount { - t.Errorf("TestGetSupportedComponents error: wrong number of components matched: expected %v, actual %v", tt.expectedMatchesCount, componentsMatched) + if len(devfileComponents) != tt.expectedMatchesCount { + t.Errorf("TestGetSupportedComponents error: wrong number of components matched: expected %v, actual %v", tt.expectedMatchesCount, len(devfileComponents)) } }) } @@ -88,10 +69,10 @@ func TestIsEnvPresent(t *testing.T) { envName := "myenv" envValue := "myenvvalue" - envVars := []common.DockerimageEnv{ + envVars := []common.Env{ { - Name: &envName, - Value: &envValue, + Name: envName, + Value: envValue, }, } @@ -127,10 +108,10 @@ func TestIsPortPresent(t *testing.T) { endpointName := "8080/tcp" var endpointPort int32 = 8080 - endpoints := []common.DockerimageEndpoint{ + endpoints := []common.Endpoint{ { - Name: &endpointName, - Port: &endpointPort, + Name: endpointName, + TargetPort: endpointPort, }, } @@ -204,16 +185,14 @@ func TestIsComponentSupported(t *testing.T) { wantIsSupported bool }{ { - name: "Case 1: Supported component", - component: common.DevfileComponent{ - Type: versionsCommon.DevfileComponentTypeDockerimage, - }, + name: "Case 1: Supported component", + component: testingutil.GetFakeComponent("comp1"), wantIsSupported: true, }, { name: "Case 2: Unsupported component", component: common.DevfileComponent{ - Type: versionsCommon.DevfileComponentTypeCheEditor, + Openshift: &versionsCommon.Openshift{}, }, wantIsSupported: false, }, diff --git a/pkg/devfile/adapters/docker/component/adapter.go b/pkg/devfile/adapters/docker/component/adapter.go index 5acf6c468a6..fe306fbff9a 100644 --- a/pkg/devfile/adapters/docker/component/adapter.go +++ b/pkg/devfile/adapters/docker/component/adapter.go @@ -14,14 +14,25 @@ import ( "github.com/openshift/odo/pkg/devfile/adapters/docker/utils" "github.com/openshift/odo/pkg/lclient" "github.com/openshift/odo/pkg/log" + "github.com/openshift/odo/pkg/machineoutput" "github.com/openshift/odo/pkg/sync" ) // New instantiantes a component adapter func New(adapterContext common.AdapterContext, client lclient.Client) Adapter { + + var loggingClient machineoutput.MachineEventLoggingClient + + if log.IsJSON() { + loggingClient = machineoutput.NewConsoleMachineEventLoggingClient() + } else { + loggingClient = machineoutput.NewNoOpMachineEventLoggingClient() + } + return Adapter{ - Client: client, - AdapterContext: adapterContext, + Client: client, + AdapterContext: adapterContext, + machineEventLogger: loggingClient, } } @@ -37,18 +48,25 @@ type Adapter struct { devfileBuildCmd string devfileRunCmd string supervisordVolumeName string + projectVolumeName string + machineEventLogger machineoutput.MachineEventLoggingClient } // Push updates the component if a matching component exists or creates one if it doesn't exist func (a Adapter) Push(parameters common.PushParameters) (err error) { - componentExists := utils.ComponentExists(a.Client, a.ComponentName) + componentExists, err := utils.ComponentExists(a.Client, a.Devfile.Data, a.ComponentName) + if err != nil { + return errors.Wrapf(err, "unable to determine if component %s exists", a.ComponentName) + } // Process the volumes defined in the devfile a.componentAliasToVolumes = common.GetVolumes(a.Devfile) a.uniqueStorage, a.volumeNameToDockerVolName, err = storage.ProcessVolumes(&a.Client, a.ComponentName, a.componentAliasToVolumes) if err != nil { - return errors.Wrapf(err, "Unable to process volumes for component %s", a.ComponentName) + return errors.Wrapf(err, "unable to process volumes for component %s", a.ComponentName) } + + a.devfileInitCmd = parameters.DevfileInitCmd a.devfileBuildCmd = parameters.DevfileBuildCmd a.devfileRunCmd = parameters.DevfileRunCmd @@ -62,19 +80,14 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) { } s.End(true) - // Get the supervisord volume - supervisordLabels := utils.GetSupervisordVolumeLabels() - supervisordVolumes, err := a.Client.GetVolumesByLabel(supervisordLabels) + a.supervisordVolumeName, err = a.createAndInitSupervisordVolumeIfReqd(componentExists) if err != nil { - return errors.Wrapf(err, "unable to retrieve supervisord volume for component %s", a.ComponentName) + return errors.Wrapf(err, "unable to create supervisord volume for component %s", a.ComponentName) } - if len(supervisordVolumes) == 0 { - a.supervisordVolumeName, err = utils.CreateAndInitSupervisordVolume(a.Client) - if err != nil { - return errors.Wrapf(err, "unable to create supervisord volume for component %s", a.ComponentName) - } - } else { - a.supervisordVolumeName = supervisordVolumes[0].Name + + a.projectVolumeName, err = a.createProjectVolumeIfReqd() + if err != nil { + return errors.Wrapf(err, "unable to determine the project source volume for component %s", a.ComponentName) } if componentExists { @@ -129,7 +142,8 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) { // DoesComponentExist returns true if a component with the specified name exists, false otherwise func (a Adapter) DoesComponentExist(cmpName string) bool { - return utils.ComponentExists(a.Client, cmpName) + componentExists, _ := utils.ComponentExists(a.Client, a.Devfile.Data, cmpName) + return componentExists } // getFirstContainerWithSourceVolume returns the first container that set mountSources: true @@ -197,10 +211,10 @@ func (a Adapter) Delete(labels map[string]string) error { if snVal := vol.Labels["storage-name"]; len(strings.TrimSpace(snVal)) > 0 { vols = append(vols, vol) - } else { - if typeVal := vol.Labels["type"]; typeVal == "projects" { - vols = append(vols, vol) - } + } else if typeVal := vol.Labels["type"]; typeVal == utils.ProjectsVolume { + vols = append(vols, vol) + } else if typeVal := vol.Labels["type"]; typeVal == utils.SupervisordVolume { + vols = append(vols, vol) } } } diff --git a/pkg/devfile/adapters/docker/component/adapter_test.go b/pkg/devfile/adapters/docker/component/adapter_test.go index 30a502e1ff5..71965bba367 100644 --- a/pkg/devfile/adapters/docker/component/adapter_test.go +++ b/pkg/devfile/adapters/docker/component/adapter_test.go @@ -26,7 +26,6 @@ func TestPush(t *testing.T) { command := "ls -la" component := "alias1" workDir := "/root" - validCommandType := common.DevfileCommandTypeExec // create a temp dir for the file indexer directory, err := ioutil.TempDir("", "") @@ -42,17 +41,27 @@ func TestPush(t *testing.T) { ForceBuild: false, } - commandActions := []versionsCommon.DevfileCommandAction{ + execCommands := []versionsCommon.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + } + validComponents := []versionsCommon.DevfileComponent{ + { + Container: &versionsCommon.Container{ + Name: component, + }, }, } tests := []struct { name string + components []versionsCommon.DevfileComponent componentType versionsCommon.DevfileComponentType client *lclient.Client wantErr bool @@ -60,18 +69,21 @@ func TestPush(t *testing.T) { { name: "Case 1: Invalid devfile", componentType: "", + components: []versionsCommon.DevfileComponent{}, client: fakeClient, wantErr: true, }, { name: "Case 2: Valid devfile", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + components: validComponents, + componentType: versionsCommon.ContainerComponentType, client: fakeClient, wantErr: false, }, { name: "Case 3: Valid devfile, docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + components: validComponents, + componentType: versionsCommon.ContainerComponentType, client: fakeErrorClient, wantErr: true, }, @@ -80,8 +92,8 @@ func TestPush(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, - CommandActions: commandActions, + Components: tt.components, + ExecCommands: execCommands, }, } @@ -116,32 +128,39 @@ func TestDoesComponentExist(t *testing.T) { tests := []struct { name string client *lclient.Client - componentType versionsCommon.DevfileComponentType + components []common.DevfileComponent componentName string getComponentName string want bool }{ { - name: "Case 1: Valid component name", - client: fakeClient, - componentType: versionsCommon.DevfileComponentTypeDockerimage, + name: "Case 1: Valid component name", + client: fakeClient, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + testingutil.GetFakeComponent("alias2"), + }, componentName: "golang", getComponentName: "golang", want: true, }, { - name: "Case 2: Non-existent component name", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeClient, - componentName: "test-name", + name: "Case 2: Non-existent component name", + client: fakeClient, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + componentName: "test", getComponentName: "fake-component", want: false, }, { - name: "Case 3: Docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeErrorClient, - componentName: "test-name", + name: "Case 3: Docker client error", + client: fakeErrorClient, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + componentName: "test", getComponentName: "fake-component", want: false, }, @@ -150,7 +169,7 @@ func TestDoesComponentExist(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: tt.components, }, } @@ -222,7 +241,7 @@ func TestAdapterDelete(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: "nodejs", + Components: []versionsCommon.DevfileComponent{}, }, } @@ -552,6 +571,44 @@ func TestAdapterDeleteVolumes(t *testing.T) { }, expectToDelete: []string{}, }, + { + name: "Case 7: Should delete both storage and supervisord mount", + containers: []types.Container{ + containerWithMount(componentName, + []types.MountPoint{ + { + Name: "my-supervisord-mount", + Type: mount.TypeVolume, + }, + { + Name: "my-storage-mount", + Type: mount.TypeVolume, + }, + }), + }, + volumes: []*types.Volume{ + { + Name: "my-supervisord-mount", + Labels: map[string]string{ + "component": componentName, + "type": "supervisord", + "image": "supervisordimage", + "version": "supervisordversion", + }, + }, + { + Name: "my-storage-mount", + Labels: map[string]string{ + "component": componentName, + "storage-name": "anyval", + }, + }, + }, + expectToDelete: []string{ + "my-supervisord-mount", + "my-storage-mount", + }, + }, } for _, tt := range tests { @@ -561,7 +618,7 @@ func TestAdapterDeleteVolumes(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: "nodejs", + Components: []versionsCommon.DevfileComponent{}, }, } diff --git a/pkg/devfile/adapters/docker/component/utils.go b/pkg/devfile/adapters/docker/component/utils.go index f250ec3b564..42087e13ebc 100644 --- a/pkg/devfile/adapters/docker/component/utils.go +++ b/pkg/devfile/adapters/docker/component/utils.go @@ -22,10 +22,13 @@ import ( "github.com/openshift/odo/pkg/exec" "github.com/openshift/odo/pkg/lclient" "github.com/openshift/odo/pkg/log" + "github.com/openshift/odo/pkg/machineoutput" ) const ( - LocalhostIP = "127.0.0.1" + // LocalhostIP is the IP address for localhost + LocalhostIP = "127.0.0.1" + projectSourceVolumeName = "odo-project-source" ) @@ -34,103 +37,68 @@ func (a Adapter) createComponent() (err error) { log.Infof("\nCreating Docker resources for component %s", a.ComponentName) - // Get or create the project source volume - var projectVolumeName string - projectVolumeLabels := utils.GetProjectVolumeLabels(componentName) - projectVols, err := a.Client.GetVolumesByLabel(projectVolumeLabels) - if err != nil { - return errors.Wrapf(err, "Unable to retrieve source volume for component "+componentName) - } - if len(projectVols) == 0 { - // A source volume needs to be created - projectVolumeName, err = storage.GenerateVolName(projectSourceVolumeName, a.ComponentName) - if err != nil { - return errors.Wrapf(err, "Unable to generate project source volume name for component %s", componentName) - } - _, err := a.Client.CreateVolume(projectVolumeName, projectVolumeLabels) - if err != nil { - return errors.Wrapf(err, "Unable to create project source volume for component %s", componentName) - } - } else if len(projectVols) == 1 { - projectVolumeName = projectVols[0].Name - } else if len(projectVols) > 1 { - return errors.Wrapf(err, "Error, multiple source volumes found for component %s", componentName) - } - supportedComponents := common.GetSupportedComponents(a.Devfile.Data) if len(supportedComponents) == 0 { - return fmt.Errorf("No valid components found in the devfile") + return fmt.Errorf("no valid components found in the devfile") } // Get the storage adapter and create the volumes if it does not exist stoAdapter := storage.New(a.AdapterContext, a.Client) err = stoAdapter.Create(a.uniqueStorage) if err != nil { - return errors.Wrapf(err, "Unable to create Docker storage adapter for component %s", componentName) + return errors.Wrapf(err, "unable to create Docker storage adapter for component %s", componentName) } // Loop over each component and start a container for it for _, comp := range supportedComponents { var dockerVolumeMounts []mount.Mount - for _, vol := range a.componentAliasToVolumes[*comp.Alias] { + for _, vol := range a.componentAliasToVolumes[comp.Container.Name] { + volMount := mount.Mount{ Type: mount.TypeVolume, - Source: a.volumeNameToDockerVolName[*vol.Name], - Target: *vol.ContainerPath, + Source: a.volumeNameToDockerVolName[vol.Name], + Target: vol.ContainerPath, } dockerVolumeMounts = append(dockerVolumeMounts, volMount) } - err = a.pullAndStartContainer(dockerVolumeMounts, projectVolumeName, comp) + err = a.pullAndStartContainer(dockerVolumeMounts, comp) if err != nil { - return errors.Wrapf(err, "unable to pull and start container %s for component %s", *comp.Alias, componentName) + return errors.Wrapf(err, "unable to pull and start container %s for component %s", comp.Container.Name, componentName) } } - klog.V(3).Infof("Successfully created all containers for component %s", componentName) + klog.V(4).Infof("Successfully created all containers for component %s", componentName) return nil } func (a Adapter) updateComponent() (componentExists bool, err error) { - klog.V(3).Info("The component already exists, attempting to update it") + klog.V(4).Info("The component already exists, attempting to update it") componentExists = true componentName := a.ComponentName - // Get the project source volume - volumeLabels := utils.GetProjectVolumeLabels(componentName) - projectVols, err := a.Client.GetVolumesByLabel(volumeLabels) - if err != nil { - return componentExists, errors.Wrapf(err, "Unable to retrieve source volume for component "+componentName) - } - if len(projectVols) == 0 { - return componentExists, fmt.Errorf("Unable to find source volume for component %s", componentName) - } else if len(projectVols) > 1 { - return componentExists, errors.Wrapf(err, "Error, multiple source volumes found for component %s", componentName) - } - projectVolumeName := projectVols[0].Name - // Get the storage adapter and create the volumes if it does not exist stoAdapter := storage.New(a.AdapterContext, a.Client) err = stoAdapter.Create(a.uniqueStorage) supportedComponents := common.GetSupportedComponents(a.Devfile.Data) if len(supportedComponents) == 0 { - return componentExists, fmt.Errorf("No valid components found in the devfile") + return componentExists, fmt.Errorf("no valid components found in the devfile") } for _, comp := range supportedComponents { // Check to see if this component is already running and if so, update it // If component isn't running, re-create it, as it either may be new, or crashed. - containers, err := a.Client.GetContainersByComponentAndAlias(componentName, *comp.Alias) + containers, err := a.Client.GetContainersByComponentAndAlias(componentName, comp.Container.Name) if err != nil { return false, errors.Wrapf(err, "unable to list containers for component %s", componentName) } var dockerVolumeMounts []mount.Mount - for _, vol := range a.componentAliasToVolumes[*comp.Alias] { + for _, vol := range a.componentAliasToVolumes[comp.Container.Name] { volMount := mount.Mount{ Type: mount.TypeVolume, - Source: a.volumeNameToDockerVolName[*vol.Name], - Target: *vol.ContainerPath, + Source: a.volumeNameToDockerVolName[vol.Name], + Target: vol.ContainerPath, } dockerVolumeMounts = append(dockerVolumeMounts, volMount) } @@ -138,9 +106,9 @@ func (a Adapter) updateComponent() (componentExists bool, err error) { log.Infof("\nCreating Docker resources for component %s", a.ComponentName) // Container doesn't exist, so need to pull its image (to be safe) and start a new container - err = a.pullAndStartContainer(dockerVolumeMounts, projectVolumeName, comp) + err = a.pullAndStartContainer(dockerVolumeMounts, comp) if err != nil { - return false, errors.Wrapf(err, "unable to pull and start container %s for component %s", *comp.Alias, componentName) + return false, errors.Wrapf(err, "unable to pull and start container %s for component %s", comp.Container.Name, componentName) } // Update componentExists so that we re-sync project and initialize supervisord if required @@ -155,7 +123,7 @@ func (a Adapter) updateComponent() (componentExists bool, err error) { return componentExists, errors.Wrapf(err, "unable to get the container config for component %s", componentName) } - portMap, namePortMapping, err := getPortMap(a.Context, comp.Endpoints, false) + portMap, namePortMapping, err := getPortMap(a.Context, comp.Container.Endpoints, false) if err != nil { return componentExists, errors.Wrapf(err, "unable to get the port map from env.yaml file for component %s", componentName) } @@ -167,22 +135,22 @@ func (a Adapter) updateComponent() (componentExists bool, err error) { if utils.DoesContainerNeedUpdating(comp, containerConfig, hostConfig, dockerVolumeMounts, mounts, portMap) { log.Infof("\nCreating Docker resources for component %s", a.ComponentName) - s := log.SpinnerNoSpin("Updating the component " + *comp.Alias) + s := log.SpinnerNoSpin("Updating the component " + comp.Container.Name) defer s.End(false) // Remove the container err := a.Client.RemoveContainer(containerID) if err != nil { - return componentExists, errors.Wrapf(err, "Unable to remove container %s for component %s", containerID, *comp.Alias) + return componentExists, errors.Wrapf(err, "unable to remove container %s for component %s", containerID, comp.Container.Name) } // Start the container - err = a.startComponent(dockerVolumeMounts, projectVolumeName, comp) + err = a.startComponent(dockerVolumeMounts, comp) if err != nil { - return false, errors.Wrapf(err, "Unable to start container for devfile component %s", *comp.Alias) + return false, errors.Wrapf(err, "unable to start container for devfile component %s", comp.Container.Name) } - klog.V(3).Infof("Successfully created container %s for component %s", *comp.Image, componentName) + klog.V(4).Infof("Successfully created container %s for component %s", comp.Container.Image, componentName) s.End(true) // Update componentExists so that we re-sync project and initialize supervisord if required @@ -191,36 +159,36 @@ func (a Adapter) updateComponent() (componentExists bool, err error) { } else { // Multiple containers were returned with the specified label (which should be unique) // Error out, as this isn't expected - return true, fmt.Errorf("Found multiple running containers for devfile component %s and cannot push changes", *comp.Alias) + return true, fmt.Errorf("found multiple running containers for devfile component %s and cannot push changes", comp.Container.Name) } } return } -func (a Adapter) pullAndStartContainer(mounts []mount.Mount, projectVolumeName string, comp versionsCommon.DevfileComponent) error { +func (a Adapter) pullAndStartContainer(mounts []mount.Mount, comp versionsCommon.DevfileComponent) error { // Container doesn't exist, so need to pull its image (to be safe) and start a new container - s := log.Spinnerf("Pulling image %s", *comp.Image) + s := log.Spinnerf("Pulling image %s", comp.Container.Image) - err := a.Client.PullImage(*comp.Image) + err := a.Client.PullImage(comp.Container.Image) if err != nil { s.End(false) - return errors.Wrapf(err, "Unable to pull %s image", *comp.Image) + return errors.Wrapf(err, "Unable to pull %s image", comp.Container.Image) } s.End(true) // Start the component container - err = a.startComponent(mounts, projectVolumeName, comp) + err = a.startComponent(mounts, comp) if err != nil { - return errors.Wrapf(err, "Unable to start container for devfile component %s", *comp.Alias) + return errors.Wrapf(err, "unable to start container for devfile component %s", comp.Container.Name) } - klog.V(3).Infof("Successfully created container %s for component %s", *comp.Image, a.ComponentName) + klog.V(4).Infof("Successfully created container %s for component %s", comp.Container.Image, a.ComponentName) return nil } -func (a Adapter) startComponent(mounts []mount.Mount, projectVolumeName string, comp versionsCommon.DevfileComponent) error { - hostConfig, namePortMapping, err := a.generateAndGetHostConfig(comp.Endpoints) +func (a Adapter) startComponent(mounts []mount.Mount, comp versionsCommon.DevfileComponent) error { + hostConfig, namePortMapping, err := a.generateAndGetHostConfig(comp.Container.Endpoints) hostConfig.Mounts = mounts if err != nil { return err @@ -231,18 +199,18 @@ func (a Adapter) startComponent(mounts []mount.Mount, projectVolumeName string, if err != nil { return err } - utils.UpdateComponentWithSupervisord(&comp, runCommand, a.supervisordVolumeName, &hostConfig) + updateComponentWithSupervisord(&comp, runCommand, a.supervisordVolumeName, &hostConfig) // If the component set `mountSources` to true, add the source volume and env CHE_PROJECTS_ROOT to it - if comp.MountSources { - utils.AddVolumeToContainer(projectVolumeName, lclient.OdoSourceVolumeMount, &hostConfig) + if comp.Container.MountSources { + utils.AddVolumeToContainer(a.projectVolumeName, lclient.OdoSourceVolumeMount, &hostConfig) - if !common.IsEnvPresent(comp.Env, common.EnvCheProjectsRoot) { + if !common.IsEnvPresent(comp.Container.Env, common.EnvCheProjectsRoot) { envName := common.EnvCheProjectsRoot envValue := lclient.OdoSourceVolumeMount - comp.Env = append(comp.Env, versionsCommon.DockerimageEnv{ - Name: &envName, - Value: &envValue, + comp.Container.Env = append(comp.Container.Env, versionsCommon.Env{ + Name: envName, + Value: envValue, }) } } @@ -254,9 +222,9 @@ func (a Adapter) startComponent(mounts []mount.Mount, projectVolumeName string, } // Create the docker container - s := log.Spinner("Starting container for " + *comp.Image) + s := log.Spinner("Starting container for " + comp.Container.Image) defer s.End(false) - err = a.Client.StartContainer(&containerConfig, &hostConfig, nil) + _, err = a.Client.StartContainer(&containerConfig, &hostConfig, nil) if err != nil { return err } @@ -267,16 +235,15 @@ func (a Adapter) startComponent(mounts []mount.Mount, projectVolumeName string, func (a Adapter) generateAndGetContainerConfig(componentName string, comp versionsCommon.DevfileComponent) container.Config { // Convert the env vars in the Devfile to the format expected by Docker - envVars := utils.ConvertEnvs(comp.Env) - ports := utils.ConvertPorts(comp.Endpoints) - containerLabels := utils.GetContainerLabels(componentName, *comp.Alias) - - containerConfig := a.Client.GenerateContainerConfig(*comp.Image, comp.Command, comp.Args, envVars, containerLabels, ports) + envVars := utils.ConvertEnvs(comp.Container.Env) + ports := utils.ConvertPorts(comp.Container.Endpoints) + containerLabels := utils.GetContainerLabels(componentName, comp.Container.Name) + containerConfig := a.Client.GenerateContainerConfig(comp.Container.Image, comp.Container.Command, comp.Container.Args, envVars, containerLabels, ports) return containerConfig } -func (a Adapter) generateAndGetHostConfig(endpoints []versionsCommon.DockerimageEndpoint) (container.HostConfig, map[nat.Port]string, error) { +func (a Adapter) generateAndGetHostConfig(endpoints []versionsCommon.Endpoint) (container.HostConfig, map[nat.Port]string, error) { // Convert the port bindings from env.yaml and generate docker host config portMap, namePortMapping, err := getPortMap(a.Context, endpoints, true) if err != nil { @@ -291,7 +258,7 @@ func (a Adapter) generateAndGetHostConfig(endpoints []versionsCommon.Dockerimage return hostConfig, namePortMapping, nil } -func getPortMap(context string, endpoints []versionsCommon.DockerimageEndpoint, show bool) (nat.PortMap, map[nat.Port]string, error) { +func getPortMap(context string, endpoints []versionsCommon.Endpoint, show bool) (nat.PortMap, map[nat.Port]string, error) { // Convert the exposed and internal port pairs saved in env.yaml file to PortMap // Todo: Use context to get the approraite envinfo after context is supported in experimental mode portmap := nat.PortMap{} @@ -335,7 +302,7 @@ func getPortMap(context string, endpoints []versionsCommon.DockerimageEndpoint, log.Successf("URL %v:%v created", LocalhostIP, url.ExposedPort) } } else if url.ExposedPort > 0 && len(endpoints) > 0 && !common.IsPortPresent(endpoints, url.Port) { - return nil, nil, fmt.Errorf("Error creating url: odo url config's port is not present in the devfile. Please re-create odo url with the new devfile port") + return nil, nil, fmt.Errorf("error creating url: odo url config's port is not present in the devfile. Please re-create odo url with the new devfile port") } } @@ -344,100 +311,248 @@ func getPortMap(context string, endpoints []versionsCommon.DockerimageEndpoint, // Executes all the commands from the devfile in order: init and build - which are both optional, and a compulsary run. // Init only runs once when the component is created. -func (a Adapter) execDevfile(pushDevfileCommands []versionsCommon.DevfileCommand, componentExists, show bool, containers []types.Container) (err error) { +func (a Adapter) execDevfile(commandsMap common.PushCommandsMap, componentExists, show bool, containers []types.Container) (err error) { + // If nothing has been passed, then the devfile is missing the required run command - if len(pushDevfileCommands) == 0 { + if len(commandsMap) == 0 { return errors.New(fmt.Sprint("error executing devfile commands - there should be at least 1 command")) } - commandOrder := []common.CommandNames{} - // Only add runinit to the expected commands if the component doesn't already exist // This would be the case when first running the container if !componentExists { - commandOrder = append(commandOrder, common.CommandNames{DefaultName: string(common.DefaultDevfileInitCommand), AdapterName: a.devfileInitCmd}) - } - commandOrder = append( - commandOrder, - common.CommandNames{DefaultName: string(common.DefaultDevfileBuildCommand), AdapterName: a.devfileBuildCmd}, - common.CommandNames{DefaultName: string(common.DefaultDevfileRunCommand), AdapterName: a.devfileRunCmd}, - ) - - // Loop through each of the expected commands in the devfile - for i, currentCommand := range commandOrder { - // Loop through each of the command given from the devfile - for _, command := range pushDevfileCommands { - // If the current command from the devfile is the currently expected command from the devfile - if command.Name == currentCommand.DefaultName || command.Name == currentCommand.AdapterName { - // If the current command is not the last command in the slice - // it is not expected to be the run command - if i < len(commandOrder)-1 { - // Any exec command such as "Init" and "Build" - - for _, action := range command.Actions { - containerID := utils.GetContainerIDForAlias(containers, *action.Component) - compInfo := common.ComponentInfo{ - ContainerName: containerID, - } - - err = exec.ExecuteDevfileBuildAction(&a.Client, action, command.Name, compInfo, show) - if err != nil { - return err - } - } - - // If the current command is the last command in the slice - // it is expected to be the run command - } else { - // Last command is "Run" - klog.V(4).Infof("Executing devfile command %v", command.Name) - - for _, action := range command.Actions { - - // Check if the devfile run component containers have supervisord as the entrypoint. - // Start the supervisord if the odo component does not exist - if !componentExists { - err = a.InitRunContainerSupervisord(*action.Component, containers) - if err != nil { - return - } - } - - containerID := utils.GetContainerIDForAlias(containers, *action.Component) - compInfo := common.ComponentInfo{ - ContainerName: containerID, - } - - if componentExists && !common.IsRestartRequired(command) { - klog.V(4).Info("restart:false, Not restarting DevRun Command") - err = exec.ExecuteDevfileRunActionWithoutRestart(&a.Client, action, command.Name, compInfo, show) - return - } - - err = exec.ExecuteDevfileRunAction(&a.Client, action, command.Name, compInfo, show) - - } - } + // Get Init Command + command, ok := commandsMap[versionsCommon.InitCommandGroupType] + if ok { + + containerID := utils.GetContainerIDForAlias(containers, command.Exec.Component) + compInfo := common.ComponentInfo{ContainerName: containerID} + err = exec.ExecuteDevfileBuildAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + if err != nil { + return err + } + } + } + + // Get Build Command + command, ok := commandsMap[versionsCommon.BuildCommandGroupType] + if ok { + containerID := utils.GetContainerIDForAlias(containers, command.Exec.Component) + compInfo := common.ComponentInfo{ContainerName: containerID} + err = exec.ExecuteDevfileBuildAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + if err != nil { + return err + } + } + + // Get Run command + command, ok = commandsMap[versionsCommon.RunCommandGroupType] + if ok { + klog.V(4).Infof("Executing devfile command %v", command.Exec.Id) + // Check if the devfile run component containers have supervisord as the entrypoint. + // Start the supervisord if the odo component does not exist + if !componentExists { + err = a.initRunContainerSupervisord(command.Exec.Component, containers) + if err != nil { + a.machineEventLogger.ReportError(err, machineoutput.TimestampNow()) + return } } + + containerID := utils.GetContainerIDForAlias(containers, command.Exec.Component) + compInfo := common.ComponentInfo{ContainerName: containerID} + if componentExists && !common.IsRestartRequired(command) { + klog.V(4).Info("restart:false, Not restarting DevRun Command") + err = exec.ExecuteDevfileRunActionWithoutRestart(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + return + } + err = exec.ExecuteDevfileRunAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) } return } -// InitRunContainerSupervisord initializes the supervisord in the container if +// initRunContainerSupervisord initializes the supervisord in the container if // the container has entrypoint that is not supervisord -func (a Adapter) InitRunContainerSupervisord(component string, containers []types.Container) (err error) { +func (a Adapter) initRunContainerSupervisord(component string, containers []types.Container) (err error) { for _, container := range containers { if container.Labels["alias"] == component && !strings.Contains(container.Command, common.SupervisordBinaryPath) { command := []string{common.SupervisordBinaryPath, "-c", common.SupervisordConfFile, "-d"} compInfo := common.ComponentInfo{ ContainerName: container.ID, } - err = exec.ExecuteCommand(&a.Client, compInfo, command, true) + err = exec.ExecuteCommand(&a.Client, compInfo, command, true, nil, nil) } } return } + +// createProjectVolumeIfReqd creates a project volume if absent and returns the +// name of the created project volume +func (a Adapter) createProjectVolumeIfReqd() (string, error) { + var projectVolumeName string + componentName := a.ComponentName + + // Get the project source volume + projectVolumeLabels := utils.GetProjectVolumeLabels(componentName) + projectVols, err := a.Client.GetVolumesByLabel(projectVolumeLabels) + if err != nil { + return "", errors.Wrapf(err, "unable to retrieve source volume for component "+componentName) + } + + if len(projectVols) == 0 { + // A source volume needs to be created + projectVolumeName, err = storage.GenerateVolName(projectSourceVolumeName, componentName) + if err != nil { + return "", errors.Wrapf(err, "unable to generate project source volume name for component %s", componentName) + } + _, err := a.Client.CreateVolume(projectVolumeName, projectVolumeLabels) + if err != nil { + return "", errors.Wrapf(err, "unable to create project source volume for component %s", componentName) + } + } else if len(projectVols) == 1 { + projectVolumeName = projectVols[0].Name + } else if len(projectVols) > 1 { + return "", errors.New(fmt.Sprintf("multiple source volumes found for component %s", componentName)) + } + + return projectVolumeName, nil +} + +// createAndInitSupervisordVolumeIfReqd creates the supervisord volume and initializes +// it with supervisord bootstrap image - assembly files and supervisord binary +// returns the name of the supervisord volume and an error if present +func (a Adapter) createAndInitSupervisordVolumeIfReqd(componentExists bool) (string, error) { + var supervisordVolumeName string + componentName := a.ComponentName + + supervisordLabels := utils.GetSupervisordVolumeLabels(componentName) + supervisordVolumes, err := a.Client.GetVolumesByLabel(supervisordLabels) + if err != nil { + return "", errors.Wrapf(err, "unable to retrieve supervisord volume for component") + } + + if len(supervisordVolumes) == 0 { + supervisordVolumeName, err = storage.GenerateVolName(common.SupervisordVolumeName, componentName) + if err != nil { + return "", errors.Wrapf(err, "unable to generate volume name for supervisord") + } + _, err := a.Client.CreateVolume(supervisordVolumeName, supervisordLabels) + if err != nil { + return "", errors.Wrapf(err, "unable to create supervisord volume for component") + } + } else { + supervisordVolumeName = supervisordVolumes[0].Name + } + + if !componentExists { + log.Info("\nInitialization") + s := log.Spinner("Initializing the component") + defer s.End(false) + + err = a.startBootstrapSupervisordInitContainer(supervisordVolumeName) + if err != nil { + return "", errors.Wrapf(err, "unable to start supervisord container for component") + } + + s.End(true) + } + + return supervisordVolumeName, nil +} + +// startBootstrapSupervisordInitContainer pulls the supervisord bootstrap image, mounts the supervisord +// volume, starts the bootstrap container and initializes the supervisord volume via its entrypoint +func (a Adapter) startBootstrapSupervisordInitContainer(supervisordVolumeName string) error { + componentName := a.ComponentName + supervisordLabels := utils.GetSupervisordVolumeLabels(componentName) + image := common.GetBootstrapperImage() + command := []string{"/usr/bin/cp"} + args := []string{ + "-r", + common.OdoInitImageContents, + common.SupervisordMountPath, + } + + var s *log.Status + if log.IsDebug() { + s = log.Spinnerf("Pulling image %s", image) + defer s.End(false) + } + + err := a.Client.PullImage(image) + if err != nil { + return errors.Wrapf(err, "unable to pull %s image", image) + } + if log.IsDebug() { + s.End(true) + } + + containerConfig := a.Client.GenerateContainerConfig(image, command, args, nil, supervisordLabels, nil) + hostConfig := container.HostConfig{} + + utils.AddVolumeToContainer(supervisordVolumeName, common.SupervisordMountPath, &hostConfig) + + // Create the docker container + if log.IsDebug() { + s = log.Spinnerf("Starting container for %s", image) + defer s.End(false) + } + containerID, err := a.Client.StartContainer(&containerConfig, &hostConfig, nil) + if err != nil { + return err + } + if log.IsDebug() { + s.End(true) + } + + // Wait for the container to exit before removing it + err = a.Client.WaitForContainer(containerID, container.WaitConditionNotRunning) + if err != nil { + return errors.Wrapf(err, "supervisord init container %s failed to complete", containerID) + } + + err = a.Client.RemoveContainer(containerID) + if err != nil { + return errors.Wrapf(err, "unable to remove supervisord init container %s", containerID) + } + + return nil +} + +// UpdateComponentWithSupervisord updates the devfile component's +// 1. command and args with supervisord, if absent +// 2. env with ODO_COMMAND_RUN and ODO_COMMAND_RUN_WORKING_DIR, if absent +func updateComponentWithSupervisord(comp *versionsCommon.DevfileComponent, runCommand versionsCommon.DevfileCommand, supervisordVolumeName string, hostConfig *container.HostConfig) { + + // Mount the supervisord volume for the run command container + if runCommand.Exec.Component == comp.Container.Name { + utils.AddVolumeToContainer(supervisordVolumeName, common.SupervisordMountPath, hostConfig) + + if len(comp.Container.Command) == 0 && len(comp.Container.Args) == 0 { + klog.V(4).Infof("Updating container %v entrypoint with supervisord", comp.Container.Name) + comp.Container.Command = append(comp.Container.Command, common.SupervisordBinaryPath) + comp.Container.Args = append(comp.Container.Args, "-c", common.SupervisordConfFile) + } + + if !common.IsEnvPresent(comp.Container.Env, common.EnvOdoCommandRun) { + envName := common.EnvOdoCommandRun + envValue := runCommand.Exec.CommandLine + comp.Container.Env = append(comp.Container.Env, versionsCommon.Env{ + Name: envName, + Value: envValue, + }) + } + + if !common.IsEnvPresent(comp.Container.Env, common.EnvOdoCommandRunWorkingDir) && runCommand.Exec.WorkingDir != "" { + envName := common.EnvOdoCommandRunWorkingDir + envValue := runCommand.Exec.WorkingDir + comp.Container.Env = append(comp.Container.Env, versionsCommon.Env{ + Name: envName, + Value: envValue, + }) + } + } +} diff --git a/pkg/devfile/adapters/docker/component/utils_test.go b/pkg/devfile/adapters/docker/component/utils_test.go index ba69a418d7e..91292808231 100644 --- a/pkg/devfile/adapters/docker/component/utils_test.go +++ b/pkg/devfile/adapters/docker/component/utils_test.go @@ -1,11 +1,14 @@ package component import ( + "reflect" + "strings" "testing" "github.com/docker/go-connections/nat" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" devfileParser "github.com/openshift/odo/pkg/devfile/parser" @@ -22,35 +25,36 @@ func TestCreateComponent(t *testing.T) { fakeErrorClient := lclient.FakeErrorNew() tests := []struct { - name string - componentType versionsCommon.DevfileComponentType - client *lclient.Client - wantErr bool + name string + components []versionsCommon.DevfileComponent + client *lclient.Client + wantErr bool }{ { - name: "Case 1: Invalid devfile", - componentType: "", - client: fakeClient, - wantErr: true, + name: "Case 1: Invalid devfile", + components: []versionsCommon.DevfileComponent{}, + client: fakeClient, + wantErr: true, }, { - name: "Case 2: Valid devfile", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeClient, - wantErr: false, + name: "Case 2: Valid devfile", + components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("alias1")}, + client: fakeClient, + wantErr: false, }, { - name: "Case 3: Valid devfile, docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeErrorClient, - wantErr: true, + name: "Case 3: Valid devfile, docker client error", + components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("alias1")}, + client: fakeErrorClient, + wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + ExecCommands: testingutil.GetFakeExecRunCommands(), + Components: tt.components, }, } @@ -78,45 +82,53 @@ func TestUpdateComponent(t *testing.T) { tests := []struct { name string - componentType versionsCommon.DevfileComponentType + components []versionsCommon.DevfileComponent componentName string client *lclient.Client wantErr bool }{ { name: "Case 1: Invalid devfile", - componentType: "", + components: []versionsCommon.DevfileComponent{}, componentName: "", client: fakeClient, wantErr: true, }, { name: "Case 2: Valid devfile", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("alias1")}, componentName: "test", client: fakeClient, wantErr: false, }, { name: "Case 3: Valid devfile, docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("alias1")}, componentName: "", client: fakeErrorClient, wantErr: true, }, { - name: "Case 3: Valid devfile, missing component", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + name: "Case 4: Valid devfile, missing component", + components: []versionsCommon.DevfileComponent{ + { + Container: &versionsCommon.Container{ + Name: "alias1", + Image: "someimage", + }, + }, + }, componentName: "fakecomponent", client: fakeClient, - wantErr: true, + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: tt.components, + ExecCommands: testingutil.GetFakeExecRunCommands(), }, } @@ -154,21 +166,21 @@ func TestPullAndStartContainer(t *testing.T) { }{ { name: "Case 1: Successfully start container, no mount", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, client: fakeClient, mounts: []mount.Mount{}, wantErr: false, }, { name: "Case 2: Docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, client: fakeErrorClient, mounts: []mount.Mount{}, wantErr: true, }, { name: "Case 3: Successfully start container, one mount", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, client: fakeClient, mounts: []mount.Mount{ { @@ -180,7 +192,7 @@ func TestPullAndStartContainer(t *testing.T) { }, { name: "Case 4: Successfully start container, multiple mounts", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, client: fakeClient, mounts: []mount.Mount{ { @@ -199,7 +211,10 @@ func TestPullAndStartContainer(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + ExecCommands: testingutil.GetFakeExecRunCommands(), }, } @@ -209,7 +224,8 @@ func TestPullAndStartContainer(t *testing.T) { } componentAdapter := New(adapterCtx, *tt.client) - err := componentAdapter.pullAndStartContainer(tt.mounts, testVolumeName, adapterCtx.Devfile.Data.GetAliasedComponents()[0]) + componentAdapter.projectVolumeName = testVolumeName + err := componentAdapter.pullAndStartContainer(tt.mounts, adapterCtx.Devfile.Data.GetAliasedComponents()[0]) // Checks for unexpected error cases if !tt.wantErr == (err != nil) { @@ -229,30 +245,26 @@ func TestStartContainer(t *testing.T) { fakeErrorClient := lclient.FakeErrorNew() tests := []struct { - name string - componentType versionsCommon.DevfileComponentType - client *lclient.Client - mounts []mount.Mount - wantErr bool + name string + client *lclient.Client + mounts []mount.Mount + wantErr bool }{ { - name: "Case 1: Successfully start container, no mount", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeClient, - mounts: []mount.Mount{}, - wantErr: false, + name: "Case 1: Successfully start container, no mount", + client: fakeClient, + mounts: []mount.Mount{}, + wantErr: false, }, { - name: "Case 2: Docker client error", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeErrorClient, - mounts: []mount.Mount{}, - wantErr: true, + name: "Case 2: Docker client error", + client: fakeErrorClient, + mounts: []mount.Mount{}, + wantErr: true, }, { - name: "Case 3: Successfully start container, one mount", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeClient, + name: "Case 3: Successfully start container, one mount", + client: fakeClient, mounts: []mount.Mount{ { Source: "test-vol", @@ -262,9 +274,8 @@ func TestStartContainer(t *testing.T) { wantErr: false, }, { - name: "Case 4: Successfully start container, multiple mount", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - client: fakeClient, + name: "Case 4: Successfully start container, multiple mount", + client: fakeClient, mounts: []mount.Mount{ { Source: "test-vol", @@ -282,7 +293,10 @@ func TestStartContainer(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + ExecCommands: testingutil.GetFakeExecRunCommands(), }, } @@ -292,7 +306,8 @@ func TestStartContainer(t *testing.T) { } componentAdapter := New(adapterCtx, *tt.client) - err := componentAdapter.startComponent(tt.mounts, testVolumeName, adapterCtx.Devfile.Data.GetAliasedComponents()[0]) + componentAdapter.projectVolumeName = testVolumeName + err := componentAdapter.startComponent(tt.mounts, adapterCtx.Devfile.Data.GetAliasedComponents()[0]) // Checks for unexpected error cases if !tt.wantErr == (err != nil) { @@ -306,7 +321,6 @@ func TestStartContainer(t *testing.T) { func TestGenerateAndGetHostConfig(t *testing.T) { fakeClient := lclient.FakeNew() testComponentName := "test" - componentType := versionsCommon.DevfileComponentTypeDockerimage endpointName := []string{"8080/tcp", "9090/tcp", "9080/tcp"} var endpointPort = []int32{8080, 9090, 9080} @@ -321,14 +335,14 @@ func TestGenerateAndGetHostConfig(t *testing.T) { urlValue []envinfo.EnvInfoURL expectResult nat.PortMap client *lclient.Client - endpoints []versionsCommon.DockerimageEndpoint + endpoints []versionsCommon.Endpoint }{ { name: "Case 1: no port mappings", urlValue: []envinfo.EnvInfoURL{}, expectResult: nil, client: fakeClient, - endpoints: []versionsCommon.DockerimageEndpoint{}, + endpoints: []versionsCommon.Endpoint{}, }, { name: "Case 2: only one port mapping", @@ -344,10 +358,10 @@ func TestGenerateAndGetHostConfig(t *testing.T) { }, }, client: fakeClient, - endpoints: []versionsCommon.DockerimageEndpoint{ + endpoints: []versionsCommon.Endpoint{ { - Name: &endpointName[0], - Port: &endpointPort[0], + Name: endpointName[0], + TargetPort: endpointPort[0], }, }, }, @@ -379,18 +393,18 @@ func TestGenerateAndGetHostConfig(t *testing.T) { }, }, client: fakeClient, - endpoints: []versionsCommon.DockerimageEndpoint{ + endpoints: []versionsCommon.Endpoint{ { - Name: &endpointName[0], - Port: &endpointPort[0], + Name: endpointName[0], + TargetPort: endpointPort[0], }, { - Name: &endpointName[1], - Port: &endpointPort[1], + Name: endpointName[1], + TargetPort: endpointPort[1], }, { - Name: &endpointName[2], - Port: &endpointPort[2], + Name: endpointName[2], + TargetPort: endpointPort[2], }, }, }, @@ -400,7 +414,7 @@ func TestGenerateAndGetHostConfig(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: componentType, + Components: []versionsCommon.DevfileComponent{}, }, } @@ -453,11 +467,9 @@ func TestGenerateAndGetHostConfig(t *testing.T) { func TestExecDevfile(t *testing.T) { testComponentName := "test" - componentType := versionsCommon.DevfileComponentTypeDockerimage command := "ls -la" workDir := "/tmp" component := "alias1" - var actionType versionsCommon.DevfileCommandType = versionsCommon.DevfileCommandTypeExec containers := []types.Container{ { @@ -480,33 +492,31 @@ func TestExecDevfile(t *testing.T) { tests := []struct { name string client *lclient.Client - pushDevfileCommands []versionsCommon.DevfileCommand + pushDevfileCommands adaptersCommon.PushCommandsMap componentExists bool wantErr bool }{ { name: "Case 1: Successful devfile command exec of devbuild and devrun", client: fakeClient, - pushDevfileCommands: []versionsCommon.DevfileCommand{ - { - Name: "devrun", - Actions: []versionsCommon.DevfileCommandAction{ - { - Command: &command, - Workdir: &workDir, - Type: &actionType, - Component: &component, + pushDevfileCommands: adaptersCommon.PushCommandsMap{ + versionsCommon.RunCommandGroupType: versionsCommon.DevfileCommand{ + Exec: &versionsCommon.Exec{ + CommandLine: command, + WorkingDir: workDir, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, }, }, }, - { - Name: "devbuild", - Actions: []versionsCommon.DevfileCommandAction{ - { - Command: &command, - Workdir: &workDir, - Type: &actionType, - Component: &component, + versionsCommon.BuildCommandGroupType: versionsCommon.DevfileCommand{ + Exec: &versionsCommon.Exec{ + CommandLine: command, + WorkingDir: workDir, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.BuildCommandGroupType, }, }, }, @@ -517,15 +527,14 @@ func TestExecDevfile(t *testing.T) { { name: "Case 2: Successful devfile command exec of devrun", client: fakeClient, - pushDevfileCommands: []versionsCommon.DevfileCommand{ - { - Name: "devrun", - Actions: []versionsCommon.DevfileCommandAction{ - { - Command: &command, - Workdir: &workDir, - Type: &actionType, - Component: &component, + pushDevfileCommands: adaptersCommon.PushCommandsMap{ + versionsCommon.RunCommandGroupType: versionsCommon.DevfileCommand{ + Exec: &versionsCommon.Exec{ + CommandLine: command, + WorkingDir: workDir, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, }, }, }, @@ -536,22 +545,21 @@ func TestExecDevfile(t *testing.T) { { name: "Case 3: No devfile push commands should result in an err", client: fakeClient, - pushDevfileCommands: []versionsCommon.DevfileCommand{}, + pushDevfileCommands: adaptersCommon.PushCommandsMap{}, componentExists: false, wantErr: true, }, { name: "Case 4: Unsuccessful devfile command exec of devrun", client: fakeErrorClient, - pushDevfileCommands: []versionsCommon.DevfileCommand{ - { - Name: "devrun", - Actions: []versionsCommon.DevfileCommandAction{ - { - Command: &command, - Workdir: &workDir, - Type: &actionType, - Component: &component, + pushDevfileCommands: adaptersCommon.PushCommandsMap{ + versionsCommon.RunCommandGroupType: versionsCommon.DevfileCommand{ + Exec: &versionsCommon.Exec{ + CommandLine: command, + WorkingDir: workDir, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, }, }, }, @@ -565,7 +573,7 @@ func TestExecDevfile(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: componentType, + Components: []versionsCommon.DevfileComponent{}, }, } @@ -586,7 +594,6 @@ func TestExecDevfile(t *testing.T) { func TestInitRunContainerSupervisord(t *testing.T) { testComponentName := "test" - componentType := versionsCommon.DevfileComponentTypeDockerimage containers := []types.Container{ { @@ -636,7 +643,7 @@ func TestInitRunContainerSupervisord(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: componentType, + Components: []versionsCommon.DevfileComponent{}, }, } @@ -646,10 +653,515 @@ func TestInitRunContainerSupervisord(t *testing.T) { } componentAdapter := New(adapterCtx, *tt.client) - err := componentAdapter.InitRunContainerSupervisord(tt.component, containers) + err := componentAdapter.initRunContainerSupervisord(tt.component, containers) if !tt.wantErr && err != nil { t.Errorf("TestInitRunContainerSupervisord error: unexpected error during init supervisord: %v", err) } }) } } + +func TestCreateProjectVolumeIfReqd(t *testing.T) { + fakeClient := lclient.FakeNew() + fakeErrorClient := lclient.FakeErrorNew() + + tests := []struct { + name string + componentName string + client *lclient.Client + wantVolumeName string + wantErr bool + }{ + { + name: "Case 1: Volume does not exist", + componentName: "somecomponent", + client: fakeClient, + wantVolumeName: projectSourceVolumeName + "-somecomponent", + wantErr: false, + }, + { + name: "Case 2: Volume exist", + componentName: "test", + client: fakeClient, + wantVolumeName: projectSourceVolumeName + "-test", + wantErr: false, + }, + { + name: "Case 3: More than one project volume exist", + componentName: "duplicate", + client: fakeClient, + wantVolumeName: "", + wantErr: true, + }, + { + name: "Case 4: Client error", + componentName: "random", + client: fakeErrorClient, + wantVolumeName: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + Components: []versionsCommon.DevfileComponent{}, + }, + } + + adapterCtx := adaptersCommon.AdapterContext{ + ComponentName: tt.componentName, + Devfile: devObj, + } + + componentAdapter := New(adapterCtx, *tt.client) + volumeName, err := componentAdapter.createProjectVolumeIfReqd() + if !tt.wantErr && err != nil { + t.Errorf("TestCreateAndGetProjectVolume error: Unexpected error: %v", err) + } else if !tt.wantErr && !strings.Contains(volumeName, tt.wantVolumeName) { + t.Errorf("TestCreateAndGetProjectVolume error: project volume name did not match, expected: %v got: %v", tt.wantVolumeName, volumeName) + } + }) + } +} + +func TestStartBootstrapSupervisordInitContainer(t *testing.T) { + + supervisordVolumeName := "supervisord" + componentName := "myComponent" + + fakeClient := lclient.FakeNew() + fakeErrorClient := lclient.FakeErrorNew() + + tests := []struct { + name string + client *lclient.Client + wantErr bool + }{ + { + name: "Case 1: Successfully create a bootstrap container", + client: fakeClient, + wantErr: false, + }, + { + name: "Case 2: Failed to create a bootstrap container ", + client: fakeErrorClient, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + Components: []versionsCommon.DevfileComponent{}, + }, + } + + adapterCtx := adaptersCommon.AdapterContext{ + ComponentName: componentName, + Devfile: devObj, + } + + componentAdapter := New(adapterCtx, *tt.client) + err := componentAdapter.startBootstrapSupervisordInitContainer(supervisordVolumeName) + if !tt.wantErr && err != nil { + t.Errorf("TestStartBootstrapSupervisordInitContainer: unexpected error got: %v wanted: %v", err, tt.wantErr) + } + }) + } + +} + +func TestCreateAndInitSupervisordVolumeIfReqd(t *testing.T) { + + fakeClient := lclient.FakeNew() + fakeErrorClient := lclient.FakeErrorNew() + + componentName := "myComponent" + + tests := []struct { + name string + client *lclient.Client + wantErr bool + }{ + { + name: "Case 1: Successfully create a bootstrap vol and container", + client: fakeClient, + wantErr: false, + }, + { + name: "Case 2: Failed to create a bootstrap vol and container ", + client: fakeErrorClient, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + Components: []versionsCommon.DevfileComponent{}, + }, + } + + adapterCtx := adaptersCommon.AdapterContext{ + ComponentName: componentName, + Devfile: devObj, + } + + componentAdapter := New(adapterCtx, *tt.client) + volName, err := componentAdapter.createAndInitSupervisordVolumeIfReqd(false) + if !tt.wantErr && err != nil { + t.Errorf("TestCreateAndInitSupervisordVolume: unexpected error %v, wanted %v", err, tt.wantErr) + } else if !tt.wantErr && !strings.Contains(volName, adaptersCommon.SupervisordVolumeName+"-"+componentName) { + t.Errorf("TestCreateAndInitSupervisordVolume: unexpected supervisord vol name, expected: %v got: %v", adaptersCommon.SupervisordVolumeName, volName) + } + }) + } + +} + +func TestUpdateComponentWithSupervisord(t *testing.T) { + + command := "ls -la" + component := "alias1" + workDir := "/" + emptyString := "" + garbageString := "garbageString" + supervisordVolumeName := "supervisordVolumeName" + defaultWorkDirEnv := adaptersCommon.EnvOdoCommandRunWorkingDir + defaultCommandEnv := adaptersCommon.EnvOdoCommandRun + + tests := []struct { + name string + commandExecs []versionsCommon.Exec + commandName string + comp versionsCommon.DevfileComponent + supervisordVolumeName string + hostConfig container.HostConfig + wantHostConfig container.HostConfig + wantCommand []string + wantArgs []string + wantEnv []versionsCommon.Env + }{ + { + name: "Case 1: No component commands, args, env", + commandExecs: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + }, + commandName: emptyString, + comp: versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Command: []string{}, + Args: []string{}, + Env: []versionsCommon.Env{}, + Name: component, + }, + }, + supervisordVolumeName: supervisordVolumeName, + hostConfig: container.HostConfig{}, + wantHostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: supervisordVolumeName, + Target: adaptersCommon.SupervisordMountPath, + }, + }, + }, + wantCommand: []string{adaptersCommon.SupervisordBinaryPath}, + wantArgs: []string{"-c", adaptersCommon.SupervisordConfFile}, + wantEnv: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: workDir, + }, + { + Name: defaultCommandEnv, + Value: command, + }, + }, + }, + { + name: "Case 2: Existing component command and no args, env", + commandExecs: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + }, + commandName: emptyString, + comp: versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Command: []string{"some", "command"}, + Args: []string{}, + Env: []versionsCommon.Env{}, + Name: component, + }, + }, + supervisordVolumeName: supervisordVolumeName, + hostConfig: container.HostConfig{}, + wantHostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: supervisordVolumeName, + Target: adaptersCommon.SupervisordMountPath, + }, + }, + }, + wantCommand: []string{"some", "command"}, + wantArgs: []string{}, + wantEnv: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: workDir, + }, + { + Name: defaultCommandEnv, + Value: command, + }, + }, + }, + { + name: "Case 3: Existing component command and args and no env", + commandExecs: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + }, + commandName: emptyString, + comp: versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Command: []string{"some", "command"}, + Args: []string{"some", "args"}, + Env: []versionsCommon.Env{}, + Name: component, + }, + }, + supervisordVolumeName: supervisordVolumeName, + hostConfig: container.HostConfig{}, + wantHostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: supervisordVolumeName, + Target: adaptersCommon.SupervisordMountPath, + }, + }, + }, + wantCommand: []string{"some", "command"}, + wantArgs: []string{"some", "args"}, + wantEnv: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: workDir, + }, + { + Name: defaultCommandEnv, + Value: command, + }, + }, + }, + { + name: "Case 4: Existing component command, args and env", + commandExecs: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + }, + commandName: emptyString, + comp: versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Command: []string{"some", "command"}, + Args: []string{"some", "args"}, + Env: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: garbageString, + }, + { + Name: defaultCommandEnv, + Value: garbageString, + }, + }, + Name: component, + }, + }, + supervisordVolumeName: supervisordVolumeName, + hostConfig: container.HostConfig{}, + wantHostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: supervisordVolumeName, + Target: adaptersCommon.SupervisordMountPath, + }, + }, + }, + wantCommand: []string{"some", "command"}, + wantArgs: []string{"some", "args"}, + wantEnv: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: garbageString, + }, + { + Name: defaultCommandEnv, + Value: garbageString, + }, + }, + }, + { + name: "Case 5: Existing host config, should append to it", + commandExecs: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: workDir, + }, + }, + commandName: emptyString, + comp: versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Command: []string{"some", "command"}, + Args: []string{"some", "args"}, + Env: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: garbageString, + }, + { + Name: defaultCommandEnv, + Value: garbageString, + }, + }, + Name: component, + }, + }, + supervisordVolumeName: supervisordVolumeName, + hostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: garbageString, + Target: garbageString, + }, + }, + }, + wantHostConfig: container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: supervisordVolumeName, + Target: adaptersCommon.SupervisordMountPath, + }, + { + Type: mount.TypeVolume, + Source: garbageString, + Target: garbageString, + }, + }, + }, + wantCommand: []string{"some", "command"}, + wantArgs: []string{"some", "args"}, + wantEnv: []versionsCommon.Env{ + { + Name: defaultWorkDirEnv, + Value: garbageString, + }, + { + Name: defaultCommandEnv, + Value: garbageString, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + ExecCommands: tt.commandExecs, + Components: []versionsCommon.DevfileComponent{ + { + Container: &versionsCommon.Container{ + Name: tt.comp.Container.Name, + }, + }, + }, + }, + } + + runCommand, err := adaptersCommon.GetRunCommand(devObj.Data, tt.commandName) + if err != nil { + t.Errorf("TestUpdateComponentWithSupervisord: error getting the run command: %v", err) + } + + updateComponentWithSupervisord(&tt.comp, runCommand, tt.supervisordVolumeName, &tt.hostConfig) + + // Check the container host config + for _, containerHostConfigMount := range tt.hostConfig.Mounts { + matched := false + for _, wantHostConfigMount := range tt.wantHostConfig.Mounts { + if reflect.DeepEqual(wantHostConfigMount, containerHostConfigMount) { + matched = true + } + } + + if !matched { + t.Errorf("TestUpdateComponentWithSupervisord: host configs source: %v target:%v do not match wanted host config", containerHostConfigMount.Source, containerHostConfigMount.Target) + } + } + + // Check the component command + if !reflect.DeepEqual(tt.comp.Container.Command, tt.wantCommand) { + t.Errorf("TestUpdateComponentWithSupervisord: component commands dont match actual: %v wanted: %v", tt.comp.Container.Command, tt.wantCommand) + } + + // Check the component args + if !reflect.DeepEqual(tt.comp.Container.Args, tt.wantArgs) { + t.Errorf("TestUpdateComponentWithSupervisord: component args dont match actual: %v wanted: %v", tt.comp.Container.Args, tt.wantArgs) + } + + // Check the component env + for _, compEnv := range tt.comp.Container.Env { + matched := false + for _, wantEnv := range tt.wantEnv { + if reflect.DeepEqual(wantEnv, compEnv) { + matched = true + } + } + + if !matched { + t.Errorf("TestUpdateComponentWithSupervisord: component env dont match env: %v:%v not present in wanted list", compEnv.Name, compEnv.Value) + } + } + + }) + } + +} diff --git a/pkg/devfile/adapters/docker/storage/adapter_test.go b/pkg/devfile/adapters/docker/storage/adapter_test.go index b6b4a8a3a9b..fcff2af1ab9 100644 --- a/pkg/devfile/adapters/docker/storage/adapter_test.go +++ b/pkg/devfile/adapters/docker/storage/adapter_test.go @@ -38,15 +38,15 @@ func TestCreate(t *testing.T) { { Name: "vol1", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, { Name: "vol2", Volume: common.DevfileVolume{ - Name: &volNames[1], - Size: &volSize, + Name: volNames[1], + Size: volSize, }, }, }, @@ -59,15 +59,15 @@ func TestCreate(t *testing.T) { { Name: "vol1", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, { Name: "vol2", Volume: common.DevfileVolume{ - Name: &volNames[1], - Size: &volSize, + Name: volNames[1], + Size: volSize, }, }, }, @@ -79,7 +79,7 @@ func TestCreate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: versionsCommon.DevfileComponentTypeDockerimage, + Components: []versionsCommon.DevfileComponent{}, }, } diff --git a/pkg/devfile/adapters/docker/storage/utils.go b/pkg/devfile/adapters/docker/storage/utils.go index 60bf74d5c8a..4adb21ac73e 100644 --- a/pkg/devfile/adapters/docker/storage/utils.go +++ b/pkg/devfile/adapters/docker/storage/utils.go @@ -18,7 +18,7 @@ const volNameMaxLength = 45 func CreateComponentStorage(Client *lclient.Client, storages []common.Storage, componentName string) (err error) { for _, storage := range storages { - volumeName := *storage.Volume.Name + volumeName := storage.Volume.Name dockerVolName := storage.Name existingDockerVolName, err := GetExistingVolume(Client, volumeName, componentName) @@ -27,7 +27,7 @@ func CreateComponentStorage(Client *lclient.Client, storages []common.Storage, c } if len(existingDockerVolName) == 0 { - klog.V(3).Infof("Creating a Docker volume for %v", volumeName) + klog.V(4).Infof("Creating a Docker volume for %v", volumeName) _, err := Create(Client, volumeName, componentName, dockerVolName) if err != nil { return errors.Wrapf(err, "Error creating Docker volume for "+volumeName) @@ -46,7 +46,7 @@ func Create(Client *lclient.Client, name, componentName, dockerVolName string) ( "storage-name": name, } - klog.V(3).Infof("Creating a Docker volume with name %v and labels %v", dockerVolName, labels) + klog.V(4).Infof("Creating a Docker volume with name %v and labels %v", dockerVolName, labels) vol, err := Client.CreateVolume(dockerVolName, labels) if err != nil { return nil, errors.Wrap(err, "unable to create Docker volume") @@ -81,14 +81,14 @@ func GetExistingVolume(Client *lclient.Client, volumeName, componentName string) "storage-name": volumeName, } - klog.V(3).Infof("Checking Docker volume for volume %v and labels %v\n", volumeName, volumeLabels) + klog.V(4).Infof("Checking Docker volume for volume %v and labels %v\n", volumeName, volumeLabels) vols, err := Client.GetVolumesByLabel(volumeLabels) if err != nil { return "", errors.Wrapf(err, "Unable to get Docker volume with selectors %v", volumeLabels) } if len(vols) == 1 { - klog.V(3).Infof("Found an existing Docker volume for volume %v and labels %v\n", volumeName, volumeLabels) + klog.V(4).Infof("Found an existing Docker volume for volume %v and labels %v\n", volumeName, volumeLabels) existingVolume := vols[0] return existingVolume.Name, nil } else if len(vols) == 0 { @@ -109,23 +109,23 @@ func ProcessVolumes(client *lclient.Client, componentName string, componentAlias // Get a list of all the unique volume names and generate their Docker volume names for _, volumes := range componentAliasToVolumes { for _, vol := range volumes { - if _, ok := processedVolumes[*vol.Name]; !ok { - processedVolumes[*vol.Name] = true + if _, ok := processedVolumes[vol.Name]; !ok { + processedVolumes[vol.Name] = true // Generate the volume Names - klog.V(3).Infof("Generating Docker volumes name for %v", *vol.Name) - generatedDockerVolName, err := GenerateVolName(*vol.Name, componentName) + klog.V(4).Infof("Generating Docker volumes name for %v", vol.Name) + generatedDockerVolName, err := GenerateVolName(vol.Name, componentName) if err != nil { return nil, nil, err } // Check if we have an existing volume with the labels, overwrite the generated name with the existing name if present - existingVolName, err := GetExistingVolume(client, *vol.Name, componentName) + existingVolName, err := GetExistingVolume(client, vol.Name, componentName) if err != nil { return nil, nil, err } if len(existingVolName) > 0 { - klog.V(3).Infof("Found an existing Docker volume for %v, volume %v will be re-used", *vol.Name, existingVolName) + klog.V(4).Infof("Found an existing Docker volume for %v, volume %v will be re-used", vol.Name, existingVolName) generatedDockerVolName = existingVolName } @@ -134,7 +134,7 @@ func ProcessVolumes(client *lclient.Client, componentName string, componentAlias Volume: vol, } uniqueStorages = append(uniqueStorages, dockerVol) - volumeNameToDockerVolName[*vol.Name] = generatedDockerVolName + volumeNameToDockerVolName[vol.Name] = generatedDockerVolName } } } diff --git a/pkg/devfile/adapters/docker/storage/utils_test.go b/pkg/devfile/adapters/docker/storage/utils_test.go index 6030772914d..114d3a7c731 100644 --- a/pkg/devfile/adapters/docker/storage/utils_test.go +++ b/pkg/devfile/adapters/docker/storage/utils_test.go @@ -28,15 +28,15 @@ func TestCreateComponentStorage(t *testing.T) { { Name: "vol1", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, { Name: "vol2", Volume: common.DevfileVolume{ - Name: &volNames[1], - Size: &volSize, + Name: volNames[1], + Size: volSize, }, }, }, @@ -49,15 +49,15 @@ func TestCreateComponentStorage(t *testing.T) { { Name: "vol1", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, { Name: "vol2", Volume: common.DevfileVolume{ - Name: &volNames[1], - Size: &volSize, + Name: volNames[1], + Size: volSize, }, }, }, @@ -95,8 +95,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "vol1", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, client: fakeClient, @@ -107,8 +107,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "vol-name", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, client: fakeErrorClient, @@ -119,7 +119,7 @@ func TestStorageCreate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { // Create one of the test volumes - _, err := Create(tt.client, *tt.storage.Volume.Name, testComponentName, tt.storage.Name) + _, err := Create(tt.client, tt.storage.Volume.Name, testComponentName, tt.storage.Name) if !tt.wantErr == (err != nil) { t.Errorf("Docker volume create unexpected error %v, wantErr %v", err, tt.wantErr) } @@ -156,9 +156,9 @@ func TestProcessVolumes(t *testing.T) { aliasVolumeMapping: map[string][]common.DevfileVolume{ "some-component": []common.DevfileVolume{ { - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, }, @@ -166,9 +166,9 @@ func TestProcessVolumes(t *testing.T) { wantStorage: []common.Storage{ { Volume: common.DevfileVolume{ - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, }, @@ -179,19 +179,19 @@ func TestProcessVolumes(t *testing.T) { aliasVolumeMapping: map[string][]common.DevfileVolume{ "some-component": []common.DevfileVolume{ { - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, { - Name: &volumeNames[1], - ContainerPath: &volumePaths[1], - Size: &volumeSizes[1], + Name: volumeNames[1], + ContainerPath: volumePaths[1], + Size: volumeSizes[1], }, { - Name: &volumeNames[2], - ContainerPath: &volumePaths[2], - Size: &volumeSizes[2], + Name: volumeNames[2], + ContainerPath: volumePaths[2], + Size: volumeSizes[2], }, }, }, @@ -199,23 +199,23 @@ func TestProcessVolumes(t *testing.T) { wantStorage: []common.Storage{ { Volume: common.DevfileVolume{ - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, { Volume: common.DevfileVolume{ - Name: &volumeNames[1], - ContainerPath: &volumePaths[1], - Size: &volumeSizes[1], + Name: volumeNames[1], + ContainerPath: volumePaths[1], + Size: volumeSizes[1], }, }, { Volume: common.DevfileVolume{ - Name: &volumeNames[2], - ContainerPath: &volumePaths[2], - Size: &volumeSizes[2], + Name: volumeNames[2], + ContainerPath: volumePaths[2], + Size: volumeSizes[2], }, }, }, @@ -226,33 +226,33 @@ func TestProcessVolumes(t *testing.T) { aliasVolumeMapping: map[string][]common.DevfileVolume{ "some-component": []common.DevfileVolume{ { - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, { - Name: &volumeNames[1], - ContainerPath: &volumePaths[1], - Size: &volumeSizes[1], + Name: volumeNames[1], + ContainerPath: volumePaths[1], + Size: volumeSizes[1], }, }, "second-component": []common.DevfileVolume{ { - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, "third-component": []common.DevfileVolume{ { - Name: &volumeNames[1], - ContainerPath: &volumePaths[1], - Size: &volumeSizes[1], + Name: volumeNames[1], + ContainerPath: volumePaths[1], + Size: volumeSizes[1], }, { - Name: &volumeNames[2], - ContainerPath: &volumePaths[2], - Size: &volumeSizes[2], + Name: volumeNames[2], + ContainerPath: volumePaths[2], + Size: volumeSizes[2], }, }, }, @@ -260,23 +260,23 @@ func TestProcessVolumes(t *testing.T) { wantStorage: []common.Storage{ { Volume: common.DevfileVolume{ - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, { Volume: common.DevfileVolume{ - Name: &volumeNames[1], - ContainerPath: &volumePaths[1], - Size: &volumeSizes[1], + Name: volumeNames[1], + ContainerPath: volumePaths[1], + Size: volumeSizes[1], }, }, { Volume: common.DevfileVolume{ - Name: &volumeNames[2], - ContainerPath: &volumePaths[2], - Size: &volumeSizes[2], + Name: volumeNames[2], + ContainerPath: volumePaths[2], + Size: volumeSizes[2], }, }, }, @@ -287,9 +287,9 @@ func TestProcessVolumes(t *testing.T) { aliasVolumeMapping: map[string][]common.DevfileVolume{ "some-component": []common.DevfileVolume{ { - Name: &volumeNames[0], - ContainerPath: &volumePaths[0], - Size: &volumeSizes[0], + Name: volumeNames[0], + ContainerPath: volumePaths[0], + Size: volumeSizes[0], }, }, }, @@ -317,7 +317,7 @@ func TestProcessVolumes(t *testing.T) { for i := range uniqueStorage { var volExists bool for j := range tt.wantStorage { - if *uniqueStorage[i].Volume.Name == *tt.wantStorage[j].Volume.Name && uniqueStorage[i].Volume.ContainerPath == tt.wantStorage[j].Volume.ContainerPath { + if uniqueStorage[i].Volume.Name == tt.wantStorage[j].Volume.Name && uniqueStorage[i].Volume.ContainerPath == tt.wantStorage[j].Volume.ContainerPath { volExists = true } } diff --git a/pkg/devfile/adapters/docker/utils/utils.go b/pkg/devfile/adapters/docker/utils/utils.go index 6824e56a188..eaec4f21d19 100644 --- a/pkg/devfile/adapters/docker/utils/utils.go +++ b/pkg/devfile/adapters/docker/utils/utils.go @@ -11,31 +11,44 @@ import ( "github.com/docker/docker/api/types/mount" adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" - "github.com/openshift/odo/pkg/devfile/adapters/docker/storage" + "github.com/openshift/odo/pkg/devfile/parser/data" "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/lclient" - "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/util" "github.com/pkg/errors" - "k8s.io/klog" ) const ( - supervisordVolume = "supervisord" - projectsVolume = "projects" - volume = "vol" + // SupervisordVolume is supervisord volume type + SupervisordVolume = "supervisord" + + // ProjectsVolume is project source volume type + ProjectsVolume = "projects" ) -// ComponentExists checks if Docker containers labeled with the specified component name exists -func ComponentExists(client lclient.Client, name string) bool { +// ComponentExists checks if a component exist +// returns true, if the number of containers equals the number of unique devfile components +// returns false, if number of containers is zero +// returns an error, if number of containers is more than zero but does not equal the number of unique devfile components +func ComponentExists(client lclient.Client, data data.DevfileData, name string) (bool, error) { containers, err := GetComponentContainers(client, name) if err != nil { - // log the error since this function basically returns a bool - log.Error(err) - return false + return false, errors.Wrapf(err, "unable to get the containers for component %s", name) + } + + supportedComponents := adaptersCommon.GetSupportedComponents(data) + + var componentExists bool + if len(containers) == 0 { + componentExists = false + } else if len(containers) == len(supportedComponents) { + componentExists = true + } else if len(containers) > 0 && len(containers) != len(supportedComponents) { + return false, errors.New(fmt.Sprintf("component %s is in an invalid state, please execute odo delete and retry odo push", name)) } - return len(containers) != 0 + + return componentExists, nil } // GetComponentContainers returns a list of the running component containers @@ -61,20 +74,20 @@ func GetContainerIDForAlias(containers []types.Container, alias string) string { } // ConvertEnvs converts environment variables from the devfile structure to an array of strings, as expected by Docker -func ConvertEnvs(vars []common.DockerimageEnv) []string { +func ConvertEnvs(vars []common.Env) []string { dockerVars := []string{} for _, env := range vars { - envString := fmt.Sprintf("%s=%s", *env.Name, *env.Value) + envString := fmt.Sprintf("%s=%s", env.Name, env.Value) dockerVars = append(dockerVars, envString) } return dockerVars } // ConvertPorts converts endpoints from the devfile structure to PortSet, which is expected by Docker -func ConvertPorts(endpoints []common.DockerimageEndpoint) nat.PortSet { +func ConvertPorts(endpoints []common.Endpoint) nat.PortSet { portSet := nat.PortSet{} for _, endpoint := range endpoints { - port := nat.Port(strconv.Itoa(int(*endpoint.Port)) + "/tcp") + port := nat.Port(strconv.Itoa(int(endpoint.TargetPort)) + "/tcp") portSet[port] = struct{}{} } return portSet @@ -87,7 +100,7 @@ func ConvertPorts(endpoints []common.DockerimageEndpoint) nat.PortSet { // so this function is necessary to prevent having to restart the container on every odo pushs func DoesContainerNeedUpdating(component common.DevfileComponent, containerConfig *container.Config, hostConfig *container.HostConfig, devfileMounts []mount.Mount, containerMounts []types.MountPoint, portMap nat.PortMap) bool { // If the image was changed in the devfile, the container needs to be updated - if *component.Image != containerConfig.Image { + if component.Container.Image != containerConfig.Image { return true } @@ -100,14 +113,14 @@ func DoesContainerNeedUpdating(component common.DevfileComponent, containerConfi // Update the container if the env vars were updated in the devfile // Need to convert the devfile envvars to the format expected by Docker - devfileEnvVars := ConvertEnvs(component.Env) + devfileEnvVars := ConvertEnvs(component.Container.Env) for _, envVar := range devfileEnvVars { if !containerHasEnvVar(envVar, containerConfig.Env) { return true } } - devfilePorts := ConvertPorts(component.Endpoints) + devfilePorts := ConvertPorts(component.Container.Endpoints) for port := range devfilePorts { if !containerHasPort(port, containerConfig.ExposedPorts) { return true @@ -147,7 +160,7 @@ func AddVolumeToContainer(volumeName, volumeMount string, hostConfig *container. func GetProjectVolumeLabels(componentName string) map[string]string { volumeLabels := map[string]string{ "component": componentName, - "type": projectsVolume, + "type": ProjectsVolume, } return volumeLabels } @@ -162,14 +175,15 @@ func GetContainerLabels(componentName, alias string) map[string]string { } // GetSupervisordVolumeLabels returns the label selectors used to retrieve the supervisord volume -func GetSupervisordVolumeLabels() map[string]string { +func GetSupervisordVolumeLabels(componentName string) map[string]string { image := adaptersCommon.GetBootstrapperImage() - _, _, _, imageTag := util.ParseComponentImageName(image) + _, imageWithoutTag, _, imageTag := util.ParseComponentImageName(image) supervisordLabels := map[string]string{ - "name": adaptersCommon.SupervisordVolumeName, - "type": supervisordVolume, - "version": imageTag, + "component": componentName, + "type": SupervisordVolume, + "image": imageWithoutTag, + "version": imageTag, } return supervisordLabels } @@ -202,115 +216,3 @@ func containerHasPort(devfilePort nat.Port, exposedPorts nat.PortSet) bool { } return false } - -// UpdateComponentWithSupervisord updates the devfile component's -// 1. command and args with supervisord, if absent -// 2. env with ODO_COMMAND_RUN and ODO_COMMAND_RUN_WORKING_DIR, if absent -func UpdateComponentWithSupervisord(comp *common.DevfileComponent, runCommand common.DevfileCommand, supervisordVolumeName string, hostConfig *container.HostConfig) { - - // Mount the supervisord volume for the run command container - for _, action := range runCommand.Actions { - if *action.Component == *comp.Alias { - AddVolumeToContainer(supervisordVolumeName, adaptersCommon.SupervisordMountPath, hostConfig) - - if len(comp.Command) == 0 && len(comp.Args) == 0 { - klog.V(4).Infof("Updating container %v entrypoint with supervisord", *comp.Alias) - comp.Command = append(comp.Command, adaptersCommon.SupervisordBinaryPath) - comp.Args = append(comp.Args, "-c", adaptersCommon.SupervisordConfFile) - } - - if !adaptersCommon.IsEnvPresent(comp.Env, adaptersCommon.EnvOdoCommandRun) { - envName := adaptersCommon.EnvOdoCommandRun - envValue := *action.Command - comp.Env = append(comp.Env, common.DockerimageEnv{ - Name: &envName, - Value: &envValue, - }) - } - - if !adaptersCommon.IsEnvPresent(comp.Env, adaptersCommon.EnvOdoCommandRunWorkingDir) && action.Workdir != nil { - envName := adaptersCommon.EnvOdoCommandRunWorkingDir - envValue := *action.Workdir - comp.Env = append(comp.Env, common.DockerimageEnv{ - Name: &envName, - Value: &envValue, - }) - } - } - } -} - -// CreateAndInitSupervisordVolume creates the supervisord volume and initializes -// it with supervisord bootstrap image - assembly files and supervisord binary -func CreateAndInitSupervisordVolume(client lclient.Client) (string, error) { - log.Info("\nInitialization") - s := log.Spinner("Initializing the component") - defer s.End(false) - - supervisordVolumeName, err := storage.GenerateVolName(adaptersCommon.SupervisordVolumeName, volume) - if err != nil { - return "", errors.Wrapf(err, "unable to generate volume name for supervisord") - } - - supervisordLabels := GetSupervisordVolumeLabels() - _, err = client.CreateVolume(supervisordVolumeName, supervisordLabels) - if err != nil { - return "", errors.Wrapf(err, "Unable to create supervisord volume for component") - } - - err = StartBootstrapSupervisordInitContainer(client, supervisordVolumeName) - if err != nil { - return "", errors.Wrapf(err, "Unable to start supervisord container for component") - } - s.End(true) - - return supervisordVolumeName, nil -} - -// StartBootstrapSupervisordInitContainer pulls the supervisord bootstrap image, mounts the supervisord -// volume, starts the bootstrap container and initializes the supervisord volume via its entrypoint -func StartBootstrapSupervisordInitContainer(client lclient.Client, supervisordVolumeName string) error { - supervisordLabels := GetSupervisordVolumeLabels() - - image := adaptersCommon.GetBootstrapperImage() - command := []string{"/usr/bin/cp"} - args := []string{ - "-r", - adaptersCommon.OdoInitImageContents, - adaptersCommon.SupervisordMountPath, - } - - var s *log.Status - if log.IsDebug() { - s = log.Spinnerf("Pulling image %s", image) - defer s.End(false) - } - - err := client.PullImage(image) - if err != nil { - return errors.Wrapf(err, "unable to pull %s image", image) - } - if log.IsDebug() { - s.End(true) - } - - containerConfig := client.GenerateContainerConfig(image, command, args, nil, supervisordLabels, nil) - hostConfig := container.HostConfig{} - - AddVolumeToContainer(supervisordVolumeName, adaptersCommon.SupervisordMountPath, &hostConfig) - - // Create the docker container - if log.IsDebug() { - s = log.Spinnerf("Starting container for %s", image) - defer s.End(false) - } - err = client.StartContainer(&containerConfig, &hostConfig, nil) - if err != nil { - return err - } - if log.IsDebug() { - s.End(true) - } - - return nil -} diff --git a/pkg/devfile/adapters/docker/utils/utils_test.go b/pkg/devfile/adapters/docker/utils/utils_test.go index 3bfe2d61c53..1def819cdf2 100644 --- a/pkg/devfile/adapters/docker/utils/utils_test.go +++ b/pkg/devfile/adapters/docker/utils/utils_test.go @@ -26,32 +26,74 @@ func TestComponentExists(t *testing.T) { name string componentName string client *lclient.Client + components []common.DevfileComponent want bool + wantErr bool }{ { name: "Case 1: Component exists", componentName: "golang", client: fakeClient, - want: true, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + testingutil.GetFakeComponent("alias2"), + }, + want: true, + wantErr: false, }, { name: "Case 2: Component doesn't exist", componentName: "fakecomponent", client: fakeClient, - want: false, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + want: false, + wantErr: false, }, { name: "Case 3: Error with docker client", componentName: "golang", client: fakeErrorClient, + components: []common.DevfileComponent{ + testingutil.GetFakeComponent("alias1"), + }, + want: false, + wantErr: true, + }, + { + name: "Case 4: Container and devfile component mismatch", + componentName: "test", + client: fakeClient, + components: []common.DevfileComponent{}, want: false, + wantErr: true, + }, + { + name: "Case 5: Devfile does not have supported components", + componentName: "golang", + client: fakeClient, + components: []common.DevfileComponent{ + { + Kubernetes: &common.Kubernetes{}, + }, + }, + want: false, + wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - cmpExists := ComponentExists(*tt.client, tt.componentName) - if tt.want != cmpExists { + devObj := devfileParser.DevfileObj{ + Data: testingutil.TestDevfileData{ + Components: tt.components, + }, + } + cmpExists, err := ComponentExists(*tt.client, devObj.Data, tt.componentName) + if !tt.wantErr && err != nil { + t.Errorf("TestComponentExists error, unexpected error - %v", err) + } else if !tt.wantErr && tt.want != cmpExists { t.Errorf("expected %v, wanted %v", cmpExists, tt.want) } }) @@ -130,40 +172,40 @@ func TestConvertEnvs(t *testing.T) { envVarsValues := []string{"value1", "value2", "value3"} tests := []struct { name string - envVars []common.DockerimageEnv + envVars []common.Env want []string }{ { name: "Case 1: One env var", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, }, want: []string{"test=value1"}, }, { name: "Case 2: Multiple env vars", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, { - Name: &envVarsNames[2], - Value: &envVarsValues[2], + Name: envVarsNames[2], + Value: envVarsValues[2], }, }, want: []string{"test=value1", "sample-var=value2", "myvar=value3"}, }, { name: "Case 3: No env vars", - envVars: []common.DockerimageEnv{}, + envVars: []common.Env{}, want: []string{}, }, } @@ -187,7 +229,7 @@ func TestDoesContainerNeedUpdating(t *testing.T) { tests := []struct { name string - envVars []common.DockerimageEnv + envVars []common.Env mounts []mount.Mount image string containerConfig container.Config @@ -198,14 +240,14 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }{ { name: "Case 1: No changes", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, }, mounts: []mount.Mount{ @@ -229,10 +271,10 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 2: Update required, env var changed", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[2], - Value: &envVarsValues[2], + Name: envVarsNames[2], + Value: envVarsValues[2], }, }, image: "golang", @@ -244,10 +286,10 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 3: Update required, image changed", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[2], - Value: &envVarsValues[2], + Name: envVarsNames[2], + Value: envVarsValues[2], }, }, image: "node", @@ -259,14 +301,14 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 4: Update required, volumes changed", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, }, mounts: []mount.Mount{ @@ -294,14 +336,14 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 5: Update required, port changed", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, }, mounts: []mount.Mount{ @@ -332,14 +374,14 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 6: Update required, exposed port changed", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, }, mounts: []mount.Mount{ @@ -388,14 +430,14 @@ func TestDoesContainerNeedUpdating(t *testing.T) { }, { name: "Case 7: Update not required, exposed port unchanged", - envVars: []common.DockerimageEnv{ + envVars: []common.Env{ { - Name: &envVarsNames[0], - Value: &envVarsValues[0], + Name: envVarsNames[0], + Value: envVarsValues[0], }, { - Name: &envVarsNames[1], - Value: &envVarsValues[1], + Name: envVarsNames[1], + Value: envVarsValues[1], }, }, mounts: []mount.Mount{ @@ -447,8 +489,8 @@ func TestDoesContainerNeedUpdating(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { component := common.DevfileComponent{ - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Image: &tt.image, + Container: &common.Container{ + Image: tt.image, Env: tt.envVars, }, } @@ -566,7 +608,7 @@ func TestGetProjectVolumeLabels(t *testing.T) { componentName: "some-component", want: map[string]string{ "component": "some-component", - "type": "projects", + "type": ProjectsVolume, }, }, { @@ -574,7 +616,7 @@ func TestGetProjectVolumeLabels(t *testing.T) { componentName: "", want: map[string]string{ "component": "", - "type": "projects", + "type": ProjectsVolume, }, }, } @@ -628,25 +670,30 @@ func TestGetContainerLabels(t *testing.T) { func TestGetSupervisordVolumeLabels(t *testing.T) { + componentNameArr := []string{"myComponent1", "myComponent2"} + tests := []struct { - name string - customImage bool - want map[string]string + name string + componentName string + customImage bool + want map[string]string }{ { - name: "Case 1: Default supervisord image", - customImage: false, + name: "Case 1: Default supervisord image", + componentName: componentNameArr[0], + customImage: false, want: map[string]string{ - "name": adaptersCommon.SupervisordVolumeName, - "type": supervisordVolume, + "component": componentNameArr[0], + "type": SupervisordVolume, }, }, { - name: "Case 2: Custom supervisord image", - customImage: true, + name: "Case 2: Custom supervisord image", + componentName: componentNameArr[1], + customImage: true, want: map[string]string{ - "name": adaptersCommon.SupervisordVolumeName, - "type": supervisordVolume, + "component": componentNameArr[1], + "type": SupervisordVolume, }, }, } @@ -656,11 +703,12 @@ func TestGetSupervisordVolumeLabels(t *testing.T) { os.Setenv("ODO_BOOTSTRAPPER_IMAGE", "customimage:customtag") } image := adaptersCommon.GetBootstrapperImage() - _, _, _, imageTag := util.ParseComponentImageName(image) + _, imageWithoutTag, _, imageTag := util.ParseComponentImageName(image) tt.want["version"] = imageTag + tt.want["image"] = imageWithoutTag - labels := GetSupervisordVolumeLabels() + labels := GetSupervisordVolumeLabels(tt.componentName) if !reflect.DeepEqual(tt.want, labels) { t.Errorf("expected %v, actual %v", tt.want, labels) } @@ -669,432 +717,6 @@ func TestGetSupervisordVolumeLabels(t *testing.T) { } -func TestUpdateComponentWithSupervisord(t *testing.T) { - - command := "ls -la" - component := "alias1" - workDir := "/" - emptyString := "" - garbageString := "garbageString" - validCommandType := common.DevfileCommandTypeExec - supervisordVolumeName := "supervisordVolumeName" - defaultWorkDirEnv := adaptersCommon.EnvOdoCommandRunWorkingDir - defaultCommandEnv := adaptersCommon.EnvOdoCommandRun - - tests := []struct { - name string - commandActions []common.DevfileCommandAction - commandName string - comp common.DevfileComponent - supervisordVolumeName string - hostConfig container.HostConfig - wantHostConfig container.HostConfig - wantCommand []string - wantArgs []string - wantEnv []common.DockerimageEnv - }{ - { - name: "Case 1: No component commands, args, env", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &component, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{}, - Args: []string{}, - Env: []common.DockerimageEnv{}, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{}, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: supervisordVolumeName, - Target: adaptersCommon.SupervisordMountPath, - }, - }, - }, - wantCommand: []string{adaptersCommon.SupervisordBinaryPath}, - wantArgs: []string{"-c", adaptersCommon.SupervisordConfFile}, - wantEnv: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &workDir, - }, - { - Name: &defaultCommandEnv, - Value: &command, - }, - }, - }, - { - name: "Case 2: Existing component command and no args, env", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &component, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{"some", "command"}, - Args: []string{}, - Env: []common.DockerimageEnv{}, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{}, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: supervisordVolumeName, - Target: adaptersCommon.SupervisordMountPath, - }, - }, - }, - wantCommand: []string{"some", "command"}, - wantArgs: []string{}, - wantEnv: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &workDir, - }, - { - Name: &defaultCommandEnv, - Value: &command, - }, - }, - }, - { - name: "Case 3: Existing component command and args and no env", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &component, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{"some", "command"}, - Args: []string{"some", "args"}, - Env: []common.DockerimageEnv{}, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{}, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: supervisordVolumeName, - Target: adaptersCommon.SupervisordMountPath, - }, - }, - }, - wantCommand: []string{"some", "command"}, - wantArgs: []string{"some", "args"}, - wantEnv: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &workDir, - }, - { - Name: &defaultCommandEnv, - Value: &command, - }, - }, - }, - { - name: "Case 4: Existing component command, args and env", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &component, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{"some", "command"}, - Args: []string{"some", "args"}, - Env: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &garbageString, - }, - { - Name: &defaultCommandEnv, - Value: &garbageString, - }, - }, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{}, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: supervisordVolumeName, - Target: adaptersCommon.SupervisordMountPath, - }, - }, - }, - wantCommand: []string{"some", "command"}, - wantArgs: []string{"some", "args"}, - wantEnv: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &garbageString, - }, - { - Name: &defaultCommandEnv, - Value: &garbageString, - }, - }, - }, - { - name: "Case 5: Existing host config, should append to it", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &component, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{"some", "command"}, - Args: []string{"some", "args"}, - Env: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &garbageString, - }, - { - Name: &defaultCommandEnv, - Value: &garbageString, - }, - }, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: garbageString, - Target: garbageString, - }, - }, - }, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: supervisordVolumeName, - Target: adaptersCommon.SupervisordMountPath, - }, - { - Type: mount.TypeVolume, - Source: garbageString, - Target: garbageString, - }, - }, - }, - wantCommand: []string{"some", "command"}, - wantArgs: []string{"some", "args"}, - wantEnv: []common.DockerimageEnv{ - { - Name: &defaultWorkDirEnv, - Value: &garbageString, - }, - { - Name: &defaultCommandEnv, - Value: &garbageString, - }, - }, - }, - { - name: "Case 6: Not a run command component", - commandActions: []common.DevfileCommandAction{ - { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, - }, - }, - commandName: emptyString, - comp: common.DevfileComponent{ - Alias: &garbageString, - DevfileComponentDockerimage: common.DevfileComponentDockerimage{ - Command: []string{}, - Args: []string{}, - Env: []common.DockerimageEnv{}, - }, - }, - supervisordVolumeName: supervisordVolumeName, - hostConfig: container.HostConfig{}, - wantHostConfig: container.HostConfig{ - Mounts: []mount.Mount{}, - }, - wantCommand: []string{}, - wantArgs: []string{}, - wantEnv: []common.DockerimageEnv{}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - devObj := devfileParser.DevfileObj{ - Data: testingutil.TestDevfileData{ - CommandActions: tt.commandActions, - ComponentType: common.DevfileComponentTypeDockerimage, - }, - } - - runCommand, err := adaptersCommon.GetRunCommand(devObj.Data, tt.commandName) - if err != nil { - t.Errorf("TestUpdateComponentWithSupervisord: error getting the run command") - } - - UpdateComponentWithSupervisord(&tt.comp, runCommand, tt.supervisordVolumeName, &tt.hostConfig) - - // Check the container host config - for _, containerHostConfigMount := range tt.hostConfig.Mounts { - matched := false - for _, wantHostConfigMount := range tt.wantHostConfig.Mounts { - if reflect.DeepEqual(wantHostConfigMount, containerHostConfigMount) { - matched = true - } - } - - if !matched { - t.Errorf("TestUpdateComponentWithSupervisord: host configs source: %v target:%v do not match wanted host config", containerHostConfigMount.Source, containerHostConfigMount.Target) - } - } - - // Check the component command - if !reflect.DeepEqual(tt.comp.Command, tt.wantCommand) { - t.Errorf("TestUpdateComponentWithSupervisord: component commands dont match actual: %v wanted: %v", tt.comp.Command, tt.wantCommand) - } - - // Check the component args - if !reflect.DeepEqual(tt.comp.Args, tt.wantArgs) { - t.Errorf("TestUpdateComponentWithSupervisord: component args dont match actual: %v wanted: %v", tt.comp.Args, tt.wantArgs) - } - - // Check the component env - for _, compEnv := range tt.comp.Env { - matched := false - for _, wantEnv := range tt.wantEnv { - if reflect.DeepEqual(wantEnv, compEnv) { - matched = true - } - } - - if !matched { - t.Errorf("TestUpdateComponentWithSupervisord: component env dont match env: %v:%v not present in wanted list", *compEnv.Name, *compEnv.Value) - } - } - - }) - } - -} - -func TestStartBootstrapSupervisordInitContainer(t *testing.T) { - - supervisordVolumeName := supervisordVolume - - fakeClient := lclient.FakeNew() - fakeErrorClient := lclient.FakeErrorNew() - - tests := []struct { - name string - client *lclient.Client - wantErr bool - }{ - { - name: "Case 1: Successfully create a bootstrap container", - client: fakeClient, - wantErr: false, - }, - { - name: "Case 2: Failed to create a bootstrap container ", - client: fakeErrorClient, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := StartBootstrapSupervisordInitContainer(*tt.client, supervisordVolumeName) - if !tt.wantErr && err != nil { - t.Errorf("TestStartBootstrapSupervisordInitContainer: unexpected error got: %v wanted: %v", err, tt.wantErr) - } - }) - } - -} - -func TestCreateAndInitSupervisordVolume(t *testing.T) { - - fakeClient := lclient.FakeNew() - fakeErrorClient := lclient.FakeErrorNew() - - tests := []struct { - name string - client *lclient.Client - wantErr bool - }{ - { - name: "Case 1: Successfully create a bootstrap vol and container", - client: fakeClient, - wantErr: false, - }, - { - name: "Case 2: Failed to create a bootstrap vol and container ", - client: fakeErrorClient, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - volName, err := CreateAndInitSupervisordVolume(*tt.client) - if !tt.wantErr && err != nil { - t.Logf("TestCreateAndInitSupervisordVolume: unexpected error %v, wanted %v", err, tt.wantErr) - } else if !tt.wantErr && volName != adaptersCommon.SupervisordVolumeName { - t.Logf("TestCreateAndInitSupervisordVolume: unexpected supervisord vol name, expected: %v got: %v", adaptersCommon.SupervisordVolumeName, volName) - } - }) - } - -} - func TestGetContainerIDForAlias(t *testing.T) { containers := []types.Container{ diff --git a/pkg/devfile/adapters/helper_test.go b/pkg/devfile/adapters/helper_test.go index ac10709ca35..f3d92608601 100644 --- a/pkg/devfile/adapters/helper_test.go +++ b/pkg/devfile/adapters/helper_test.go @@ -23,7 +23,7 @@ func TestNewPlatformAdapter(t *testing.T) { adapterType: "kubernetes.Adapter", name: "get platform adapter", componentName: "test", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, wantErr: false, }, } @@ -31,7 +31,7 @@ func TestNewPlatformAdapter(t *testing.T) { t.Run("get platform adapter", func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{}, }, } diff --git a/pkg/devfile/adapters/kubernetes/component/adapter.go b/pkg/devfile/adapters/kubernetes/component/adapter.go index bbede80e76e..19ce18ac277 100644 --- a/pkg/devfile/adapters/kubernetes/component/adapter.go +++ b/pkg/devfile/adapters/kubernetes/component/adapter.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" + "github.com/openshift/odo/pkg/exec" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,18 +19,28 @@ import ( "github.com/openshift/odo/pkg/devfile/adapters/kubernetes/storage" "github.com/openshift/odo/pkg/devfile/adapters/kubernetes/utils" versionsCommon "github.com/openshift/odo/pkg/devfile/parser/data/common" - "github.com/openshift/odo/pkg/exec" "github.com/openshift/odo/pkg/kclient" "github.com/openshift/odo/pkg/log" + "github.com/openshift/odo/pkg/machineoutput" odoutil "github.com/openshift/odo/pkg/odo/util" "github.com/openshift/odo/pkg/sync" ) // New instantiantes a component adapter func New(adapterContext common.AdapterContext, client kclient.Client) Adapter { + + var loggingClient machineoutput.MachineEventLoggingClient + + if log.IsJSON() { + loggingClient = machineoutput.NewConsoleMachineEventLoggingClient() + } else { + loggingClient = machineoutput.NewNoOpMachineEventLoggingClient() + } + return Adapter{ - Client: client, - AdapterContext: adapterContext, + Client: client, + AdapterContext: adapterContext, + machineEventLogger: loggingClient, } } @@ -37,9 +48,12 @@ func New(adapterContext common.AdapterContext, client kclient.Client) Adapter { type Adapter struct { Client kclient.Client common.AdapterContext - devfileInitCmd string - devfileBuildCmd string - devfileRunCmd string + devfileInitCmd string + devfileBuildCmd string + devfileRunCmd string + devfileDebugCmd string + devfileDebugPort int + machineEventLogger machineoutput.MachineEventLoggingClient } // Push updates the component if a matching component exists or creates one if it doesn't exist @@ -50,6 +64,8 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) { a.devfileInitCmd = parameters.DevfileInitCmd a.devfileBuildCmd = parameters.DevfileBuildCmd a.devfileRunCmd = parameters.DevfileRunCmd + a.devfileDebugCmd = parameters.DevfileDebugCmd + a.devfileDebugPort = parameters.DebugPort podChanged := false var podName string @@ -74,6 +90,16 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) { s.End(true) log.Infof("\nCreating Kubernetes resources for component %s", a.ComponentName) + + if parameters.Debug { + pushDevfileDebugCommands, err := common.ValidateAndGetDebugDevfileCommands(a.Devfile.Data, a.devfileDebugCmd) + if err != nil { + return fmt.Errorf("debug command is not valid") + } + pushDevfileCommands[versionsCommon.DebugCommandGroupType] = pushDevfileDebugCommands + parameters.ForceBuild = true + } + err = a.createOrUpdateComponent(componentExists) if err != nil { return errors.Wrap(err, "unable to create or update component") @@ -126,7 +152,7 @@ func (a Adapter) Push(parameters common.PushParameters) (err error) { if execRequired { log.Infof("\nExecuting devfile commands for component %s", a.ComponentName) - err = a.execDevfile(pushDevfileCommands, componentExists, parameters.Show, pod.GetName(), pod.Spec.Containers) + err = a.execDevfile(pushDevfileCommands, componentExists, parameters.Show, pod.GetName(), pod.Spec.Containers, parameters.Debug) if err != nil { return err } @@ -156,7 +182,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { return fmt.Errorf("No valid components found in the devfile") } - containers, err = utils.UpdateContainersWithSupervisord(a.Devfile, containers, a.devfileRunCmd) + containers, err = utils.UpdateContainersWithSupervisord(a.Devfile, containers, a.devfileRunCmd, a.devfileDebugCmd, a.devfileDebugPort) if err != nil { return err } @@ -175,23 +201,23 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { // Get a list of all the unique volume names and generate their PVC names for _, volumes := range componentAliasToVolumes { for _, vol := range volumes { - if _, ok := processedVolumes[*vol.Name]; !ok { - processedVolumes[*vol.Name] = true + if _, ok := processedVolumes[vol.Name]; !ok { + processedVolumes[vol.Name] = true // Generate the PVC Names - klog.V(3).Infof("Generating PVC name for %v", *vol.Name) - generatedPVCName, err := storage.GeneratePVCNameFromDevfileVol(*vol.Name, componentName) + klog.V(4).Infof("Generating PVC name for %v", vol.Name) + generatedPVCName, err := storage.GeneratePVCNameFromDevfileVol(vol.Name, componentName) if err != nil { return err } // Check if we have an existing PVC with the labels, overwrite the generated name with the existing name if present - existingPVCName, err := storage.GetExistingPVC(&a.Client, *vol.Name, componentName) + existingPVCName, err := storage.GetExistingPVC(&a.Client, vol.Name, componentName) if err != nil { return err } if len(existingPVCName) > 0 { - klog.V(3).Infof("Found an existing PVC for %v, PVC %v will be re-used", *vol.Name, existingPVCName) + klog.V(4).Infof("Found an existing PVC for %v, PVC %v will be re-used", vol.Name, existingPVCName) generatedPVCName = existingPVCName } @@ -200,7 +226,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { Volume: vol, } uniqueStorages = append(uniqueStorages, pvc) - volumeNameToPVCName[*vol.Name] = generatedPVCName + volumeNameToPVCName[vol.Name] = generatedPVCName } } } @@ -221,17 +247,17 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { } } serviceSpec := kclient.GenerateServiceSpec(objectMeta.Name, containerPorts) - klog.V(3).Infof("Creating deployment %v", deploymentSpec.Template.GetName()) - klog.V(3).Infof("The component name is %v", componentName) + klog.V(4).Infof("Creating deployment %v", deploymentSpec.Template.GetName()) + klog.V(4).Infof("The component name is %v", componentName) if utils.ComponentExists(a.Client, componentName) { // If the component already exists, get the resource version of the deploy before updating - klog.V(3).Info("The component already exists, attempting to update it") + klog.V(4).Info("The component already exists, attempting to update it") deployment, err := a.Client.UpdateDeployment(*deploymentSpec) if err != nil { return err } - klog.V(3).Infof("Successfully updated component %v", componentName) + klog.V(4).Infof("Successfully updated component %v", componentName) oldSvc, err := a.Client.KubeClient.CoreV1().Services(a.Client.Namespace).Get(componentName, metav1.GetOptions{}) objectMetaTemp := objectMeta ownerReference := kclient.GenerateOwnerReference(deployment) @@ -243,7 +269,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { if err != nil { return err } - klog.V(3).Infof("Successfully created Service for component %s", componentName) + klog.V(4).Infof("Successfully created Service for component %s", componentName) } } else { if len(serviceSpec.Ports) > 0 { @@ -253,7 +279,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { if err != nil { return err } - klog.V(3).Infof("Successfully update Service for component %s", componentName) + klog.V(4).Infof("Successfully update Service for component %s", componentName) } else { err = a.Client.KubeClient.CoreV1().Services(a.Client.Namespace).Delete(componentName, &metav1.DeleteOptions{}) if err != nil { @@ -266,7 +292,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { if err != nil { return err } - klog.V(3).Infof("Successfully created component %v", componentName) + klog.V(4).Infof("Successfully created component %v", componentName) ownerReference := kclient.GenerateOwnerReference(deployment) objectMetaTemp := objectMeta objectMetaTemp.OwnerReferences = append(objectMeta.OwnerReferences, ownerReference) @@ -275,7 +301,7 @@ func (a Adapter) createOrUpdateComponent(componentExists bool) (err error) { if err != nil { return err } - klog.V(3).Infof("Successfully created Service for component %s", componentName) + klog.V(4).Infof("Successfully created Service for component %s", componentName) } } @@ -305,81 +331,77 @@ func (a Adapter) waitAndGetComponentPod(hideSpinner bool) (*corev1.Pod, error) { // Executes all the commands from the devfile in order: init and build - which are both optional, and a compulsary run. // Init only runs once when the component is created. -func (a Adapter) execDevfile(pushDevfileCommands []versionsCommon.DevfileCommand, componentExists, show bool, podName string, containers []corev1.Container) (err error) { +func (a Adapter) execDevfile(commandsMap common.PushCommandsMap, componentExists, show bool, podName string, containers []corev1.Container, isDebug bool) (err error) { // If nothing has been passed, then the devfile is missing the required run command - if len(pushDevfileCommands) == 0 { + if len(commandsMap) == 0 { return errors.New(fmt.Sprint("error executing devfile commands - there should be at least 1 command")) } - commandOrder := []common.CommandNames{} + compInfo := common.ComponentInfo{ + PodName: podName, + } - // Only add runinit to the expected commands if the component doesn't already exist - // This would be the case when first running the container + // only execute Init command, if it is first run of container. if !componentExists { - commandOrder = append(commandOrder, common.CommandNames{DefaultName: string(common.DefaultDevfileInitCommand), AdapterName: a.devfileInitCmd}) - } - commandOrder = append( - commandOrder, - common.CommandNames{DefaultName: string(common.DefaultDevfileBuildCommand), AdapterName: a.devfileBuildCmd}, - common.CommandNames{DefaultName: string(common.DefaultDevfileRunCommand), AdapterName: a.devfileRunCmd}, - ) - - // Loop through each of the expected commands in the devfile - for i, currentCommand := range commandOrder { - // Loop through each of the command given from the devfile - for _, command := range pushDevfileCommands { - // If the current command from the devfile is the currently expected command from the devfile - if command.Name == currentCommand.DefaultName || command.Name == currentCommand.AdapterName { - // If the current command is not the last command in the slice - // it is not expected to be the run command - if i < len(commandOrder)-1 { - // Any exec command such as "Init" and "Build" - - for _, action := range command.Actions { - compInfo := common.ComponentInfo{ - ContainerName: *action.Component, - PodName: podName, - } - - err = exec.ExecuteDevfileBuildAction(&a.Client, action, command.Name, compInfo, show) - if err != nil { - return err - } - } - - // If the current command is the last command in the slice - // it is expected to be the run command - } else { - // Last command is "Run" - klog.V(4).Infof("Executing devfile command %v", command.Name) - - for _, action := range command.Actions { - - // Check if the devfile run component containers have supervisord as the entrypoint. - // Start the supervisord if the odo component does not exist - if !componentExists { - err = a.InitRunContainerSupervisord(*action.Component, podName, containers) - if err != nil { - return - } - } - - compInfo := common.ComponentInfo{ - ContainerName: *action.Component, - PodName: podName, - } - - if componentExists && !common.IsRestartRequired(command) { - klog.V(4).Infof("restart:false, Not restarting DevRun Command") - err = exec.ExecuteDevfileRunActionWithoutRestart(&a.Client, action, command.Name, compInfo, show) - return - } - - err = exec.ExecuteDevfileRunAction(&a.Client, action, command.Name, compInfo, show) - } - } + + // Get Init Command + command, ok := commandsMap[versionsCommon.InitCommandGroupType] + if ok { + compInfo.ContainerName = command.Exec.Component + err = exec.ExecuteDevfileBuildAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + if err != nil { + return err } + } + + } + + // Get Build Command + command, ok := commandsMap[versionsCommon.BuildCommandGroupType] + if ok { + compInfo.ContainerName = command.Exec.Component + err = exec.ExecuteDevfileBuildAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + if err != nil { + return err + } + } + + // Get Run or Debug Command + if isDebug { + command, ok = commandsMap[versionsCommon.DebugCommandGroupType] + } else { + command, ok = commandsMap[versionsCommon.RunCommandGroupType] + } + if ok { + klog.V(4).Infof("Executing devfile command %v", command.Exec.Id) + compInfo.ContainerName = command.Exec.Component + + // Check if the devfile debug component containers have supervisord as the entrypoint. + // Start the supervisord if the odo component does not exist + if !componentExists { + err = a.InitRunContainerSupervisord(command.Exec.Component, podName, containers) + if err != nil { + a.machineEventLogger.ReportError(err, machineoutput.TimestampNow()) + return + } + } + + if componentExists && !common.IsRestartRequired(command) { + klog.V(4).Infof("restart:false, Not restarting %v Command", command.Exec.Id) + if isDebug { + err = exec.ExecuteDevfileDebugActionWithoutRestart(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + } else { + err = exec.ExecuteDevfileRunActionWithoutRestart(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + } + return + } + if isDebug { + err = exec.ExecuteDevfileDebugAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + } else { + err = exec.ExecuteDevfileRunAction(&a.Client, *command.Exec, command.Exec.Id, compInfo, show, a.machineEventLogger) + } + } return @@ -395,7 +417,7 @@ func (a Adapter) InitRunContainerSupervisord(containerName, podName string, cont ContainerName: containerName, PodName: podName, } - err = exec.ExecuteCommand(&a.Client, compInfo, command, true) + err = exec.ExecuteCommand(&a.Client, compInfo, command, true, nil, nil) } } diff --git a/pkg/devfile/adapters/kubernetes/component/adapter_test.go b/pkg/devfile/adapters/kubernetes/component/adapter_test.go index a683a5298d5..f5755c13e65 100644 --- a/pkg/devfile/adapters/kubernetes/component/adapter_test.go +++ b/pkg/devfile/adapters/kubernetes/component/adapter_test.go @@ -8,6 +8,7 @@ import ( adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" devfileParser "github.com/openshift/odo/pkg/devfile/parser" + "github.com/openshift/odo/pkg/devfile/parser/data/common" versionsCommon "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/kclient" "github.com/openshift/odo/pkg/testingutil" @@ -37,7 +38,7 @@ func TestCreateOrUpdateComponent(t *testing.T) { }, { name: "Case: Valid devfile", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, running: false, wantErr: false, }, @@ -49,16 +50,21 @@ func TestCreateOrUpdateComponent(t *testing.T) { }, { name: "Case: Valid devfile, already running component", - componentType: versionsCommon.DevfileComponentTypeDockerimage, + componentType: versionsCommon.ContainerComponentType, running: true, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + var comp versionsCommon.DevfileComponent + if tt.componentType != "" { + comp = testingutil.GetFakeComponent("component") + } devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{comp}, + ExecCommands: []versionsCommon.Exec{getExecCommand("run", versionsCommon.RunCommandGroupType)}, }, } @@ -218,14 +224,12 @@ func TestDoesComponentExist(t *testing.T) { }{ { name: "Case 1: Valid component name", - componentType: versionsCommon.DevfileComponentTypeDockerimage, componentName: "test-name", getComponentName: "test-name", want: true, }, { name: "Case 2: Non-existent component name", - componentType: versionsCommon.DevfileComponentTypeDockerimage, componentName: "test-name", getComponentName: "fake-component", want: false, @@ -235,7 +239,8 @@ func TestDoesComponentExist(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("component")}, + ExecCommands: []versionsCommon.Exec{getExecCommand("run", versionsCommon.RunCommandGroupType)}, }, } @@ -282,29 +287,26 @@ func TestWaitAndGetComponentPod(t *testing.T) { wantErr bool }{ { - name: "Case 1: Running", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - status: corev1.PodRunning, - wantErr: false, + name: "Case 1: Running", + status: corev1.PodRunning, + wantErr: false, }, { - name: "Case 2: Failed pod", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - status: corev1.PodFailed, - wantErr: true, + name: "Case 2: Failed pod", + status: corev1.PodFailed, + wantErr: true, }, { - name: "Case 3: Unknown pod", - componentType: versionsCommon.DevfileComponentTypeDockerimage, - status: corev1.PodUnknown, - wantErr: true, + name: "Case 3: Unknown pod", + status: corev1.PodUnknown, + wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, + Components: []versionsCommon.DevfileComponent{testingutil.GetFakeComponent("component")}, }, } @@ -382,7 +384,7 @@ func TestAdapterDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: "nodejs", + // ComponentType: "nodejs", }, } @@ -422,3 +424,19 @@ func TestAdapterDelete(t *testing.T) { }) } } + +func getExecCommand(id string, group common.DevfileCommandGroupType) versionsCommon.Exec { + + commands := [...]string{"ls -la", "pwd"} + component := "component" + workDir := [...]string{"/", "/root"} + + return versionsCommon.Exec{ + Id: id, + CommandLine: commands[0], + Component: component, + WorkingDir: workDir[0], + Group: &common.Group{Kind: group}, + } + +} diff --git a/pkg/devfile/adapters/kubernetes/storage/utils.go b/pkg/devfile/adapters/kubernetes/storage/utils.go index f15656fbcd3..ae77c963585 100644 --- a/pkg/devfile/adapters/kubernetes/storage/utils.go +++ b/pkg/devfile/adapters/kubernetes/storage/utils.go @@ -20,8 +20,8 @@ const pvcNameMaxLen = 45 func CreateComponentStorage(Client *kclient.Client, storages []common.Storage, componentName string) (err error) { for _, storage := range storages { - volumeName := *storage.Volume.Name - volumeSize := *storage.Volume.Size + volumeName := storage.Volume.Name + volumeSize := storage.Volume.Size pvcName := storage.Name existingPVCName, err := GetExistingPVC(Client, volumeName, componentName) @@ -30,7 +30,7 @@ func CreateComponentStorage(Client *kclient.Client, storages []common.Storage, c } if len(existingPVCName) == 0 { - klog.V(3).Infof("Creating a PVC for %v", volumeName) + klog.V(4).Infof("Creating a PVC for %v", volumeName) _, err := Create(Client, volumeName, volumeSize, componentName, pvcName) if err != nil { return errors.Wrapf(err, "Error creating PVC for "+volumeName) @@ -68,7 +68,7 @@ func Create(Client *kclient.Client, name, size, componentName, pvcName string) ( objectMeta.OwnerReferences = append(objectMeta.OwnerReferences, ownerReference) // Create PVC - klog.V(3).Infof("Creating a PVC with name %v and labels %v", pvcName, labels) + klog.V(4).Infof("Creating a PVC with name %v and labels %v", pvcName, labels) pvc, err := Client.CreatePVC(objectMeta, *pvcSpec) if err != nil { return nil, errors.Wrap(err, "unable to create PVC") @@ -95,14 +95,14 @@ func GetExistingPVC(Client *kclient.Client, volumeName, componentName string) (s label := "component=" + componentName + ",storage-name=" + volumeName - klog.V(3).Infof("Checking PVC for volume %v and label %v\n", volumeName, label) + klog.V(4).Infof("Checking PVC for volume %v and label %v\n", volumeName, label) PVCs, err := Client.GetPVCsFromSelector(label) if err != nil { return "", errors.Wrapf(err, "Unable to get PVC with selectors "+label) } if len(PVCs) == 1 { - klog.V(3).Infof("Found an existing PVC for volume %v and label %v\n", volumeName, label) + klog.V(4).Infof("Found an existing PVC for volume %v and label %v\n", volumeName, label) existingPVC := &PVCs[0] return existingPVC.Name, nil } else if len(PVCs) == 0 { diff --git a/pkg/devfile/adapters/kubernetes/storage/utils_test.go b/pkg/devfile/adapters/kubernetes/storage/utils_test.go index a60f2234c24..6c6dfac49f4 100644 --- a/pkg/devfile/adapters/kubernetes/storage/utils_test.go +++ b/pkg/devfile/adapters/kubernetes/storage/utils_test.go @@ -33,15 +33,15 @@ func TestCreateComponentStorage(t *testing.T) { { Name: "vol1-pvc", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, { Name: "vol2-pvc", Volume: common.DevfileVolume{ - Name: &volNames[1], - Size: &volSize, + Name: volNames[1], + Size: volSize, }, }, }, @@ -66,7 +66,7 @@ func TestCreateComponentStorage(t *testing.T) { }) // Create one of the test volumes - createdPVC, err := Create(fkclient, *tt.storages[0].Volume.Name, *tt.storages[0].Volume.Size, testComponentName, tt.storages[0].Name) + createdPVC, err := Create(fkclient, tt.storages[0].Volume.Name, tt.storages[0].Volume.Size, testComponentName, tt.storages[0].Name) if err != nil { t.Errorf("Error creating PVC %v: %v", tt.storages[0].Name, err) } @@ -78,9 +78,9 @@ func TestCreateComponentStorage(t *testing.T) { fkclientset.Kubernetes.PrependReactor("create", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) { labels := map[string]string{ "component": testComponentName, - "storage-name": *tt.storages[1].Volume.Name, + "storage-name": tt.storages[1].Volume.Name, } - PVC := testingutil.FakePVC(tt.storages[1].Name, *tt.storages[1].Volume.Size, labels) + PVC := testingutil.FakePVC(tt.storages[1].Name, tt.storages[1].Volume.Size, labels) return true, PVC, nil }) @@ -114,8 +114,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "vol1-pvc", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, wantErr: false, @@ -126,8 +126,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, wantErr: true, @@ -138,8 +138,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &volSize, + Name: volNames[0], + Size: volSize, }, }, wantErr: true, @@ -150,8 +150,8 @@ func TestStorageCreate(t *testing.T) { storage: common.Storage{ Name: "vol1-pvc", Volume: common.DevfileVolume{ - Name: &volNames[0], - Size: &garbageVolSize, + Name: volNames[0], + Size: garbageVolSize, }, }, wantErr: true, @@ -179,17 +179,17 @@ func TestStorageCreate(t *testing.T) { fkclientset.Kubernetes.PrependReactor("create", "persistentvolumeclaims", func(action ktesting.Action) (bool, runtime.Object, error) { labels := map[string]string{ "component": testComponentName, - "storage-name": *tt.storage.Volume.Name, + "storage-name": tt.storage.Volume.Name, } if tt.wantErr { return true, nil, tt.err } - PVC := testingutil.FakePVC(tt.storage.Name, *tt.storage.Volume.Size, labels) + PVC := testingutil.FakePVC(tt.storage.Name, tt.storage.Volume.Size, labels) return true, PVC, nil }) // Create one of the test volumes - createdPVC, err := Create(fkclient, *tt.storage.Volume.Name, *tt.storage.Volume.Size, testComponentName, tt.storage.Name) + createdPVC, err := Create(fkclient, tt.storage.Volume.Name, tt.storage.Volume.Size, testComponentName, tt.storage.Name) if !tt.wantErr && err != nil { t.Errorf("Error creating PVC %v: %v", tt.storage.Name, err) } else if tt.wantErr && err != nil { diff --git a/pkg/devfile/adapters/kubernetes/utils/utils.go b/pkg/devfile/adapters/kubernetes/utils/utils.go index 8a39be97418..7a2c0222640 100644 --- a/pkg/devfile/adapters/kubernetes/utils/utils.go +++ b/pkg/devfile/adapters/kubernetes/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "fmt" + "strconv" "strings" adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" @@ -22,31 +23,31 @@ func ComponentExists(client kclient.Client, name string) bool { } // ConvertEnvs converts environment variables from the devfile structure to kubernetes structure -func ConvertEnvs(vars []common.DockerimageEnv) []corev1.EnvVar { +func ConvertEnvs(vars []common.Env) []corev1.EnvVar { kVars := []corev1.EnvVar{} for _, env := range vars { kVars = append(kVars, corev1.EnvVar{ - Name: *env.Name, - Value: *env.Value, + Name: env.Name, + Value: env.Value, }) } return kVars } // ConvertPorts converts endpoint variables from the devfile structure to kubernetes ContainerPort -func ConvertPorts(endpoints []common.DockerimageEndpoint) ([]corev1.ContainerPort, error) { +func ConvertPorts(endpoints []common.Endpoint) ([]corev1.ContainerPort, error) { containerPorts := []corev1.ContainerPort{} for _, endpoint := range endpoints { - name := strings.TrimSpace(util.GetDNS1123Name(strings.ToLower(*endpoint.Name))) + name := strings.TrimSpace(util.GetDNS1123Name(strings.ToLower(endpoint.Name))) name = util.TruncateString(name, 15) for _, c := range containerPorts { - if c.ContainerPort == *endpoint.Port { - return nil, fmt.Errorf("Devfile contains multiple identical ports: %v", *endpoint.Port) + if c.ContainerPort == endpoint.TargetPort { + return nil, fmt.Errorf("Devfile contains multiple identical ports: %v", endpoint.TargetPort) } } containerPorts = append(containerPorts, corev1.ContainerPort{ Name: name, - ContainerPort: *endpoint.Port, + ContainerPort: endpoint.TargetPort, }) } return containerPorts, nil @@ -56,13 +57,13 @@ func ConvertPorts(endpoints []common.DockerimageEndpoint) ([]corev1.ContainerPor func GetContainers(devfileObj devfileParser.DevfileObj) ([]corev1.Container, error) { var containers []corev1.Container for _, comp := range adaptersCommon.GetSupportedComponents(devfileObj.Data) { - envVars := ConvertEnvs(comp.Env) + envVars := ConvertEnvs(comp.Container.Env) resourceReqs := GetResourceReqs(comp) - ports, err := ConvertPorts(comp.Endpoints) + ports, err := ConvertPorts(comp.Container.Endpoints) if err != nil { return nil, err } - container := kclient.GenerateContainer(*comp.Alias, *comp.Image, false, comp.Command, comp.Args, envVars, resourceReqs, ports) + container := kclient.GenerateContainer(comp.Container.Name, comp.Container.Image, false, comp.Container.Command, comp.Container.Args, envVars, resourceReqs, ports) for _, c := range containers { for _, containerPort := range c.Ports { for _, curPort := range container.Ports { @@ -74,7 +75,7 @@ func GetContainers(devfileObj devfileParser.DevfileObj) ([]corev1.Container, err } // If `mountSources: true` was set, add an empty dir volume to the container to sync the source to - if comp.MountSources { + if comp.Container.MountSources { container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: kclient.OdoSourceVolume, MountPath: kclient.OdoSourceVolumeMount, @@ -109,54 +110,107 @@ func isEnvPresent(EnvVars []corev1.EnvVar, envVarName string) bool { // UpdateContainersWithSupervisord updates the run components entrypoint and volume mount // with supervisord if no entrypoint has been specified for the component in the devfile -func UpdateContainersWithSupervisord(devfileObj devfileParser.DevfileObj, containers []corev1.Container, devfileRunCmd string) ([]corev1.Container, error) { +func UpdateContainersWithSupervisord(devfileObj devfileParser.DevfileObj, containers []corev1.Container, devfileRunCmd string, devfileDebugCmd string, devfileDebugPort int) ([]corev1.Container, error) { runCommand, err := adaptersCommon.GetRunCommand(devfileObj.Data, devfileRunCmd) if err != nil { return nil, err } - for i, container := range containers { - for _, action := range runCommand.Actions { - // Check if the container belongs to a run command component - if container.Name == *action.Component { - // If the run component container has no entrypoint and arguments, override the entrypoint with supervisord - if len(container.Command) == 0 && len(container.Args) == 0 { - klog.V(3).Infof("Updating container %v entrypoint with supervisord", container.Name) - container.Command = append(container.Command, adaptersCommon.SupervisordBinaryPath) - container.Args = append(container.Args, "-c", adaptersCommon.SupervisordConfFile) + debugCommand, err := adaptersCommon.GetDebugCommand(devfileObj.Data, devfileDebugCmd) + if err != nil { + return nil, err + } + + for i := range containers { + container := &containers[i] + // Check if the container belongs to a run command component + if container.Name == runCommand.Exec.Component { + // If the run component container has no entrypoint and arguments, override the entrypoint with supervisord + if len(container.Command) == 0 && len(container.Args) == 0 { + overrideContainerArgs(container) + } + + // Always mount the supervisord volume in the run component container + klog.V(4).Infof("Updating container %v with supervisord volume mounts", container.Name) + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: adaptersCommon.SupervisordVolumeName, + MountPath: adaptersCommon.SupervisordMountPath, + }) + + // Update the run container's ENV for work dir and command + // only if the env var is not set in the devfile + // This is done, so supervisord can use it in it's program + if !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandRun) { + klog.V(4).Infof("Updating container %v env with run command", container.Name) + container.Env = append(container.Env, + corev1.EnvVar{ + Name: adaptersCommon.EnvOdoCommandRun, + Value: runCommand.Exec.CommandLine, + }) + } + + if !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandRunWorkingDir) && runCommand.Exec.WorkingDir != "" { + klog.V(4).Infof("Updating container %v env with run command's workdir", container.Name) + container.Env = append(container.Env, + corev1.EnvVar{ + Name: adaptersCommon.EnvOdoCommandRunWorkingDir, + Value: runCommand.Exec.WorkingDir, + }) + } + } + + // Check if the container belongs to a debug command component + if debugCommand.Exec != nil && container.Name == debugCommand.Exec.Component { + // If the debug component container has no entrypoint and arguments, override the entrypoint with supervisord + if len(container.Command) == 0 && len(container.Args) == 0 { + overrideContainerArgs(container) + } + + foundMountPath := false + for _, mounts := range container.VolumeMounts { + if mounts.Name == adaptersCommon.SupervisordVolumeName && mounts.MountPath == adaptersCommon.SupervisordMountPath { + foundMountPath = true } + } - // Always mount the supervisord volume in the run component container - klog.V(3).Infof("Updating container %v with supervisord volume mounts", container.Name) + if !foundMountPath { + // Always mount the supervisord volume in the debug component container + klog.V(4).Infof("Updating container %v with supervisord volume mounts", container.Name) container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: adaptersCommon.SupervisordVolumeName, MountPath: adaptersCommon.SupervisordMountPath, }) + } - // Update the run container's ENV for work dir and command - // only if the env var is not set in the devfile - // This is done, so supervisord can use it in it's program - if !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandRun) { - klog.V(3).Infof("Updating container %v env with run command", container.Name) - container.Env = append(container.Env, - corev1.EnvVar{ - Name: adaptersCommon.EnvOdoCommandRun, - Value: *action.Command, - }) - } + // Update the debug container's ENV for work dir and command + // only if the env var is not set in the devfile + // This is done, so supervisord can use it in it's program + if !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandDebug) { + klog.V(4).Infof("Updating container %v env with debug command", container.Name) + container.Env = append(container.Env, + corev1.EnvVar{ + Name: adaptersCommon.EnvOdoCommandDebug, + Value: debugCommand.Exec.CommandLine, + }) + } - if !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandRunWorkingDir) && action.Workdir != nil { - klog.V(3).Infof("Updating container %v env with run command's workdir", container.Name) - container.Env = append(container.Env, - corev1.EnvVar{ - Name: adaptersCommon.EnvOdoCommandRunWorkingDir, - Value: *action.Workdir, - }) - } + if debugCommand.Exec.WorkingDir != "" && !isEnvPresent(container.Env, adaptersCommon.EnvOdoCommandDebugWorkingDir) { + klog.V(4).Infof("Updating container %v env with debug command's workdir", container.Name) + container.Env = append(container.Env, + corev1.EnvVar{ + Name: adaptersCommon.EnvOdoCommandDebugWorkingDir, + Value: debugCommand.Exec.WorkingDir, + }) + } - // Update the containers array since the array is not a pointer to the container - containers[i] = container + if !isEnvPresent(container.Env, adaptersCommon.EnvDebugPort) { + klog.V(4).Infof("Updating container %v env with debug command's debugPort", container.Name) + container.Env = append(container.Env, + corev1.EnvVar{ + Name: adaptersCommon.EnvDebugPort, + Value: strconv.Itoa(devfileDebugPort), + }) } } } @@ -169,8 +223,8 @@ func UpdateContainersWithSupervisord(devfileObj devfileParser.DevfileObj, contai func GetResourceReqs(comp common.DevfileComponent) corev1.ResourceRequirements { reqs := corev1.ResourceRequirements{} limits := make(corev1.ResourceList) - if comp.MemoryLimit != nil { - memoryLimit, err := resource.ParseQuantity(*comp.MemoryLimit) + if &comp.Container.MemoryLimit != nil { + memoryLimit, err := resource.ParseQuantity(comp.Container.MemoryLimit) if err == nil { limits[corev1.ResourceMemory] = memoryLimit } @@ -178,3 +232,10 @@ func GetResourceReqs(comp common.DevfileComponent) corev1.ResourceRequirements { } return reqs } + +// overrideContainerArgs overrides the container's entrypoint with supervisord +func overrideContainerArgs(container *corev1.Container) { + klog.V(4).Infof("Updating container %v entrypoint with supervisord", container.Name) + container.Command = append(container.Command, adaptersCommon.SupervisordBinaryPath) + container.Args = append(container.Args, "-c", adaptersCommon.SupervisordConfFile) +} diff --git a/pkg/devfile/adapters/kubernetes/utils/utils_test.go b/pkg/devfile/adapters/kubernetes/utils/utils_test.go index 8f12c3c8a78..0b5baa485c7 100644 --- a/pkg/devfile/adapters/kubernetes/utils/utils_test.go +++ b/pkg/devfile/adapters/kubernetes/utils/utils_test.go @@ -2,6 +2,7 @@ package utils import ( "reflect" + "strconv" "testing" adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" @@ -17,11 +18,22 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { command := "ls -la" component := "alias1" + + debugCommand := "nodemon --inspect={DEBUG_PORT}" + debugComponent := "alias2" + image := "image1" workDir := "/root" - validCommandType := common.DevfileCommandTypeExec emptyString := "" defaultCommand := []string{"tail"} + execRunGroup := versionsCommon.Group{ + IsDefault: true, + Kind: versionsCommon.RunCommandGroupType, + } + execDebugGroup := versionsCommon.Group{ + IsDefault: true, + Kind: versionsCommon.DebugCommandGroupType, + } defaultArgs := []string{"-f", "/dev/null"} supervisordCommand := []string{adaptersCommon.SupervisordBinaryPath} supervisordArgs := []string{"-c", adaptersCommon.SupervisordConfFile} @@ -29,8 +41,10 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { tests := []struct { name string runCommand string + debugCommand string + debugPort int containers []corev1.Container - commandActions []common.DevfileCommandAction + execCommands []common.Exec componentType common.DevfileComponentType isSupervisordEntrypoint bool wantErr bool @@ -48,15 +62,15 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { Env: []corev1.EnvVar{}, }, }, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []versionsCommon.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, }, }, - componentType: common.DevfileComponentTypeDockerimage, + componentType: common.ContainerComponentType, isSupervisordEntrypoint: false, wantErr: false, }, @@ -73,14 +87,14 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { Env: []corev1.EnvVar{}, }, }, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []versionsCommon.Exec{ { - Command: &command, - Component: &component, - Type: &validCommandType, + CommandLine: command, + Component: component, + Group: &execRunGroup, }, }, - componentType: common.DevfileComponentTypeDockerimage, + componentType: common.ContainerComponentType, isSupervisordEntrypoint: false, wantErr: false, }, @@ -95,15 +109,15 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { Env: []corev1.EnvVar{}, }, }, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []versionsCommon.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, }, }, - componentType: common.DevfileComponentTypeDockerimage, + componentType: common.ContainerComponentType, isSupervisordEntrypoint: true, wantErr: false, }, @@ -118,15 +132,16 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { Env: []corev1.EnvVar{}, }, }, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []versionsCommon.Exec{ { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + Id: "customcommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, }, }, - componentType: common.DevfileComponentTypeDockerimage, + componentType: common.ContainerComponentType, isSupervisordEntrypoint: true, wantErr: false, }, @@ -141,15 +156,126 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { Env: []corev1.EnvVar{}, }, }, - commandActions: []versionsCommon.DevfileCommandAction{ + execCommands: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, + }, + }, + componentType: common.ContainerComponentType, + isSupervisordEntrypoint: true, + wantErr: true, + }, + + { + name: "Case: empty debug command", + runCommand: "customRunCommand", + debugCommand: emptyString, + debugPort: 5858, + containers: []corev1.Container{ + { + Name: component, + Image: image, + ImagePullPolicy: corev1.PullAlways, + Env: []corev1.EnvVar{}, + }, + { + Name: debugComponent, + Image: image, + ImagePullPolicy: corev1.PullAlways, + Env: []corev1.EnvVar{}, + }, + }, + execCommands: []versionsCommon.Exec{ + { + Id: "customRunCommand", + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, + }, + { + CommandLine: debugCommand, + Component: debugComponent, + WorkingDir: workDir, + Group: &execDebugGroup, + }, + }, + componentType: common.ContainerComponentType, + isSupervisordEntrypoint: true, + wantErr: false, + }, + { + name: "Case: custom debug command", + runCommand: emptyString, + debugCommand: "customdebugcommand", + debugPort: 3000, + containers: []corev1.Container{ + { + Name: component, + Image: image, + ImagePullPolicy: corev1.PullAlways, + Env: []corev1.EnvVar{}, + }, + }, + execCommands: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, + }, + { + Id: "customdebugcommand", + CommandLine: debugCommand, + Component: component, + WorkingDir: workDir, + Group: &execDebugGroup, + }, + }, + componentType: common.ContainerComponentType, + isSupervisordEntrypoint: true, + wantErr: false, + }, + { + name: "Case: wrong custom debug command", + runCommand: emptyString, + debugCommand: "customdebugcommand123", + debugPort: 9090, + containers: []corev1.Container{ + { + Name: component, + Image: image, + ImagePullPolicy: corev1.PullAlways, + Env: []corev1.EnvVar{}, + }, + { + Name: debugComponent, + Image: image, + ImagePullPolicy: corev1.PullAlways, + Env: []corev1.EnvVar{}, + }, + }, + execCommands: []versionsCommon.Exec{ + { + CommandLine: command, + Component: component, + WorkingDir: workDir, + Group: &execRunGroup, + }, { - Command: &command, - Component: &component, - Workdir: &workDir, - Type: &validCommandType, + CommandLine: debugCommand, + Component: debugComponent, + WorkingDir: workDir, + Group: &versionsCommon.Group{ + IsDefault: true, + Kind: versionsCommon.BuildCommandGroupType, + }, }, }, - componentType: common.DevfileComponentTypeDockerimage, + componentType: common.ContainerComponentType, isSupervisordEntrypoint: true, wantErr: true, }, @@ -158,62 +284,107 @@ func TestUpdateContainersWithSupervisord(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := devfileParser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: tt.componentType, - CommandActions: tt.commandActions, + Components: []versionsCommon.DevfileComponent{ + { + Container: &versionsCommon.Container{ + Name: component, + }, + }, + { + Container: &versionsCommon.Container{ + Name: debugComponent, + }, + }, + }, + ExecCommands: tt.execCommands, }, } - containers, err := UpdateContainersWithSupervisord(devObj, tt.containers, tt.runCommand) + containers, err := UpdateContainersWithSupervisord(devObj, tt.containers, tt.runCommand, tt.debugCommand, tt.debugPort) - if !tt.wantErr && err != nil { - t.Errorf("TestUpdateContainersWithSupervisord unxpected error: %v", err) - } else if tt.wantErr && err != nil { - // return since we dont want to test anything further - return + if tt.wantErr { + if err == nil { + t.Error("wanted error but got no error") + } else { + // return since we dont want to test anything further + return + } + } else { + if err != nil { + t.Errorf("TestUpdateContainersWithSupervisord: unexpected error %v", err) + } } // Check if the supervisord volume has been mounted supervisordVolumeMountMatched := false envRunMatched := false envWorkDirMatched := false + envDebugMatched := false + envDebugWorkDirMatched := false + envDebugPortMatched := false - if tt.commandActions[0].Workdir == nil { + if tt.execCommands[0].WorkingDir == "" { // if workdir is not present, dont test for matching the env envWorkDirMatched = true } + if len(tt.execCommands) >= 2 && tt.execCommands[1].WorkingDir == "" { + // if workdir is not present, dont test for matching the env + envDebugWorkDirMatched = true + } + for _, container := range containers { - if container.Name == component { - for _, volumeMount := range container.VolumeMounts { - if volumeMount.Name == adaptersCommon.SupervisordVolumeName && volumeMount.MountPath == adaptersCommon.SupervisordMountPath { - supervisordVolumeMountMatched = true + for _, testContainer := range tt.containers { + if container.Name == testContainer.Name { + for _, volumeMount := range container.VolumeMounts { + if volumeMount.Name == adaptersCommon.SupervisordVolumeName && volumeMount.MountPath == adaptersCommon.SupervisordMountPath { + supervisordVolumeMountMatched = true + } } - } - for _, envVar := range container.Env { - if envVar.Name == adaptersCommon.EnvOdoCommandRun && envVar.Value == *tt.commandActions[0].Command { - envRunMatched = true - } - if tt.commandActions[0].Workdir != nil && envVar.Name == adaptersCommon.EnvOdoCommandRunWorkingDir && envVar.Value == *tt.commandActions[0].Workdir { - envWorkDirMatched = true - } - } + for _, envVar := range container.Env { + if envVar.Name == adaptersCommon.EnvOdoCommandRun && envVar.Value == tt.execCommands[0].CommandLine { + envRunMatched = true + } + if tt.execCommands[0].WorkingDir != "" && envVar.Name == adaptersCommon.EnvOdoCommandRunWorkingDir && envVar.Value == tt.execCommands[0].WorkingDir { + envWorkDirMatched = true + } - if tt.isSupervisordEntrypoint && (!reflect.DeepEqual(container.Command, supervisordCommand) || !reflect.DeepEqual(container.Args, supervisordArgs)) { - t.Errorf("TestUpdateContainersWithSupervisord error: commands and args mismatched for container %v, expected command: %v actual command: %v, expected args: %v actual args: %v", component, supervisordCommand, container.Command, supervisordArgs, container.Args) - } else if !tt.isSupervisordEntrypoint && (!reflect.DeepEqual(container.Command, defaultCommand) || !reflect.DeepEqual(container.Args, defaultArgs)) { - t.Errorf("TestUpdateContainersWithSupervisord error: commands and args mismatched for container %v, expected command: %v actual command: %v, expected args: %v actual args: %v", component, defaultCommand, container.Command, defaultArgs, container.Args) + // if the debug command is also present + if len(tt.execCommands) >= 2 { + // check if the debug command env was set properly + if envVar.Name == adaptersCommon.EnvOdoCommandDebug && envVar.Value == tt.execCommands[1].CommandLine { + envDebugMatched = true + } + // check if the debug command's workingDir env was set properly + if tt.execCommands[1].WorkingDir != "" && envVar.Name == adaptersCommon.EnvOdoCommandDebugWorkingDir && envVar.Value == tt.execCommands[1].WorkingDir { + envDebugWorkDirMatched = true + } + // check if the debug command's debugPort env was set properly + if envVar.Name == adaptersCommon.EnvDebugPort && envVar.Value == strconv.Itoa(tt.debugPort) { + envDebugPortMatched = true + } + } + } + if tt.isSupervisordEntrypoint && (!reflect.DeepEqual(container.Command, supervisordCommand) || !reflect.DeepEqual(container.Args, supervisordArgs)) { + t.Errorf("TestUpdateContainersWithSupervisord error: commands and args mismatched for container %v, expected command: %v actual command: %v, expected args: %v actual args: %v", component, supervisordCommand, container.Command, supervisordArgs, container.Args) + } else if !tt.isSupervisordEntrypoint && (!reflect.DeepEqual(container.Command, defaultCommand) || !reflect.DeepEqual(container.Args, defaultArgs)) { + t.Errorf("TestUpdateContainersWithSupervisord error: commands and args mismatched for container %v, expected command: %v actual command: %v, expected args: %v actual args: %v", component, defaultCommand, container.Command, defaultArgs, container.Args) + } } } } - if !supervisordVolumeMountMatched { t.Errorf("TestUpdateContainersWithSupervisord error: could not find supervisord volume mounts for container %v", component) } if !envRunMatched || !envWorkDirMatched { t.Errorf("TestUpdateContainersWithSupervisord error: could not find env vars for supervisord in container %v, found command env: %v, found work dir env: %v", component, envRunMatched, envWorkDirMatched) } + + if len(tt.execCommands) >= 2 && (!envDebugMatched || !envDebugWorkDirMatched || !envDebugPortMatched) { + t.Errorf("TestUpdateContainersWithSupervisord error: could not find env vars for supervisord in container %v, found debug env: %v, found work dir env: %v, found debug port env: %v", component, envDebugMatched, envDebugWorkDirMatched, envDebugPortMatched) + } }) } diff --git a/pkg/devfile/parser/context/apiVersion.go b/pkg/devfile/parser/context/apiVersion.go index 68f4e1fd12b..8f37641482b 100644 --- a/pkg/devfile/parser/context/apiVersion.go +++ b/pkg/devfile/parser/context/apiVersion.go @@ -19,19 +19,34 @@ func (d *DevfileCtx) SetDevfileAPIVersion() error { return errors.Wrapf(err, "failed to decode devfile json") } - // Get "apiVersion" value from the map - apiVersion, ok := r["apiVersion"] - if !ok { - return fmt.Errorf("apiVersion not present in devfile") - } + var apiVer string + + // Get "apiVersion" value from map for devfile V1 + apiVersion, okApi := r["apiVersion"] + + // Get "schemaVersion" value from map for devfile V2 + schemaVersion, okSchema := r["schemaVersion"] + + if okApi { + apiVer = apiVersion.(string) + // apiVersion cannot be empty + if apiVer == "" { + return fmt.Errorf("apiVersion in devfile cannot be empty") + } + + } else if okSchema { + apiVer = schemaVersion.(string) + // SchemaVersion cannot be empty + if schemaVersion.(string) == "" { + return fmt.Errorf("schemaVersion in devfile cannot be empty") + } + } else { + return fmt.Errorf("apiVersion or schemaVersion not present in devfile") - // apiVersion cannot be empty - if apiVersion.(string) == "" { - return fmt.Errorf("apiVersion in devfile cannot be empty") } // Successful - d.apiVersion = apiVersion.(string) + d.apiVersion = apiVer klog.V(4).Infof("devfile apiVersion: '%s'", d.apiVersion) return nil } diff --git a/pkg/devfile/parser/context/apiVersion_test.go b/pkg/devfile/parser/context/apiVersion_test.go index ad708cdcc4a..904809637e7 100644 --- a/pkg/devfile/parser/context/apiVersion_test.go +++ b/pkg/devfile/parser/context/apiVersion_test.go @@ -32,7 +32,7 @@ func TestSetDevfileAPIVersion(t *testing.T) { name: "apiVersion not present", rawJson: []byte(emptyJson), want: "", - wantErr: fmt.Errorf("apiVersion not present in devfile"), + wantErr: fmt.Errorf("apiVersion or schemaVersion not present in devfile"), }, { name: "apiVersion empty", diff --git a/pkg/devfile/parser/data/1.0.0/components.go b/pkg/devfile/parser/data/1.0.0/components.go index 2fd71de403d..a3d6f27a368 100644 --- a/pkg/devfile/parser/data/1.0.0/components.go +++ b/pkg/devfile/parser/data/1.0.0/components.go @@ -6,17 +6,38 @@ import ( "github.com/openshift/odo/pkg/devfile/parser/data/common" ) -// GetComponents returns the slice of DevfileComponent objects parsed from the Devfile +func (d *Devfile100) GetMetadata() common.DevfileMetadata { + // No GenerateName field in V2 + return common.DevfileMetadata{ + Name: d.Metadata.Name, + //Version: No field in V1 + } +} + +/// GetComponents returns the slice of DevfileComponent objects parsed from the Devfile func (d *Devfile100) GetComponents() []common.DevfileComponent { - return d.Components + var comps []common.DevfileComponent + for _, v := range d.Components { + comps = append(comps, convertV1ComponentToCommon(v)) + } + return comps } // GetAliasedComponents returns the slice of DevfileComponent objects that each have an alias func (d *Devfile100) GetAliasedComponents() []common.DevfileComponent { + // TODO(adi): All components are aliased for V2, this method should be removed from interface + // when we remove V1 + var comps []common.DevfileComponent + for _, v := range d.Components { + comps = append(comps, convertV1ComponentToCommon(v)) + } + var aliasedComponents = []common.DevfileComponent{} - for _, comp := range d.Components { - if comp.Alias != nil { - aliasedComponents = append(aliasedComponents, comp) + for _, comp := range comps { + if comp.Container != nil { + if comp.Container.Name != "" { + aliasedComponents = append(aliasedComponents, comp) + } } } return aliasedComponents @@ -24,17 +45,188 @@ func (d *Devfile100) GetAliasedComponents() []common.DevfileComponent { // GetProjects returns the slice of DevfileProject objects parsed from the Devfile func (d *Devfile100) GetProjects() []common.DevfileProject { - return d.Projects + + var projects []common.DevfileProject + for _, v := range d.Projects { + projects = append(projects, convertV1ProjectToCommon(v)) + + } + + return projects } // GetCommands returns the slice of DevfileCommand objects parsed from the Devfile func (d *Devfile100) GetCommands() []common.DevfileCommand { + var commands []common.DevfileCommand + for _, v := range d.Commands { + cmd := convertV1CommandToCommon(v) - for _, command := range d.Commands { - command.Name = strings.ToLower(command.Name) - commands = append(commands, command) + commands = append(commands, cmd) } return commands } + +func (d *Devfile100) GetParent() common.DevfileParent { + return common.DevfileParent{} + +} + +func (d *Devfile100) GetEvents() common.DevfileEvents { + return common.DevfileEvents{} + +} + +func convertV1CommandToCommon(c Command) (d common.DevfileCommand) { + var exec common.Exec + + name := strings.ToLower(c.Name) + + for _, action := range c.Actions { + + if action.Type == DevfileCommandTypeExec { + exec = common.Exec{ + Attributes: c.Attributes, + CommandLine: action.Command, + Component: action.Component, + Group: getGroup(name), + Id: name, + WorkingDir: action.Workdir, + // Env: + // Label: + } + } + + } + + // TODO: Previewurl + return common.DevfileCommand{ + //TODO(adi): Type + Exec: &exec, + } +} + +func convertV1ComponentToCommon(c Component) (component common.DevfileComponent) { + + var endpoints []common.Endpoint + for _, v := range c.ComponentDockerimage.Endpoints { + endpoints = append(endpoints, convertV1EndpointsToCommon(v)) + } + + var envs []common.Env + for _, v := range c.ComponentDockerimage.Env { + envs = append(envs, convertV1EnvToCommon(v)) + } + + var volumes []common.VolumeMount + for _, v := range c.ComponentDockerimage.Volumes { + volumes = append(volumes, convertV1VolumeToCommon(v)) + } + + container := common.Container{ + Name: c.Alias, + Endpoints: endpoints, + Env: envs, + Image: c.ComponentDockerimage.Image, + MemoryLimit: c.ComponentDockerimage.MemoryLimit, + MountSources: c.MountSources, + VolumeMounts: volumes, + Command: c.Command, + Args: c.Args, + } + + component = common.DevfileComponent{Container: &container} + + return component +} + +func convertV1EndpointsToCommon(e DockerimageEndpoint) common.Endpoint { + return common.Endpoint{ + // Attributes: + // Configuration: + Name: e.Name, + TargetPort: e.Port, + } +} + +func convertV1EnvToCommon(e DockerimageEnv) common.Env { + return common.Env{ + Name: e.Name, + Value: e.Value, + } +} + +func convertV1VolumeToCommon(v DockerimageVolume) common.VolumeMount { + return common.VolumeMount{ + Name: v.Name, + Path: v.ContainerPath, + } +} + +func convertV1ProjectToCommon(p Project) common.DevfileProject { + var project = common.DevfileProject{ + ClonePath: p.ClonePath, + Name: p.Name, + } + + switch p.Source.Type { + case ProjectTypeGit: + git := common.Git{ + Branch: p.Source.Branch, + Location: p.Source.Location, + SparseCheckoutDir: p.Source.SparseCheckoutDir, + StartPoint: p.Source.StartPoint, + } + + project.Git = &git + + case ProjectTypeGitHub: + github := common.Github{ + Branch: p.Source.Branch, + Location: p.Source.Location, + SparseCheckoutDir: p.Source.SparseCheckoutDir, + StartPoint: p.Source.StartPoint, + } + project.Github = &github + + case ProjectTypeZip: + zip := common.Zip{ + Location: p.Source.Location, + SparseCheckoutDir: p.Source.SparseCheckoutDir, + } + project.Zip = &zip + + } + + return project + +} + +func getGroup(name string) *common.Group { + + switch name { + case "devrun": + return &common.Group{ + Kind: common.RunCommandGroupType, + IsDefault: true, + } + case "devbuild": + return &common.Group{ + Kind: common.BuildCommandGroupType, + IsDefault: true, + } + case "devinit": + return &common.Group{ + Kind: common.InitCommandGroupType, + IsDefault: true, + } + case "debugrun": + return &common.Group{ + Kind: common.DebugCommandGroupType, + IsDefault: true, + } + } + + return nil +} diff --git a/pkg/devfile/parser/data/1.0.0/types.go b/pkg/devfile/parser/data/1.0.0/types.go index 75ee4461f4b..d18358ebbe3 100644 --- a/pkg/devfile/parser/data/1.0.0/types.go +++ b/pkg/devfile/parser/data/1.0.0/types.go @@ -1,26 +1,233 @@ package version100 -import ( - "github.com/openshift/odo/pkg/devfile/parser/data/common" -) - // Devfile100 struct maps to devfile 1.0.0 version schema type Devfile100 struct { // Devfile section "apiVersion" - ApiVersion common.ApiVersion `yaml:"apiVersion" json:"apiVersion"` + ApiVersion ApiVersion `yaml:"apiVersion" json:"apiVersion"` // Devfile section "metadata" - Metadata common.DevfileMetadata `yaml:"metadata" json:"metadata"` + Metadata Metadata `yaml:"metadata" json:"metadata"` // Devfile section projects - Projects []common.DevfileProject `yaml:"projects,omitempty" json:"projects,omitempty"` + Projects []Project `yaml:"projects,omitempty" json:"projects,omitempty"` - Attributes common.Attributes `yaml:"attributes,omitempty" json:"attributes,omitempty"` + Attributes Attributes `yaml:"attributes,omitempty" json:"attributes,omitempty"` // Description of the workspace components, such as editor and plugins - Components []common.DevfileComponent `yaml:"components,omitempty" json:"components,omitempty"` + Components []Component `yaml:"components,omitempty" json:"components,omitempty"` // Description of the predefined commands to be available in workspace - Commands []common.DevfileCommand `yaml:"commands,omitempty" json:"commands,omitempty"` + Commands []Command `yaml:"commands,omitempty" json:"commands,omitempty"` +} + +// -------------- Supported devfile project types ------------ // +// DevfileProjectType store valid devfile project types +type ProjectType string + +const ( + ProjectTypeGit ProjectType = "git" + ProjectTypeGitHub ProjectType = "github" + ProjectTypeZip ProjectType = "zip" +) + +var SupportedProjectTypes = []ProjectType{ProjectTypeGit} + +// -------------- Supported devfile component types ------------ // +// DevfileComponentType stores valid devfile component types +type ComponentType string + +const ( + DevfileComponentTypeCheEditor ComponentType = "cheEditor" + DevfileComponentTypeChePlugin ComponentType = "chePlugin" + DevfileComponentTypeDockerimage ComponentType = "dockerimage" + DevfileComponentTypeKubernetes ComponentType = "kubernetes" + DevfileComponentTypeOpenshift ComponentType = "openshift" +) + +// -------------- Supported devfile command types ------------ // +type CommandType string + +const ( + DevfileCommandTypeInit CommandType = "init" + DevfileCommandTypeBuild CommandType = "build" + DevfileCommandTypeRun CommandType = "run" + DevfileCommandTypeDebug CommandType = "debug" + DevfileCommandTypeExec CommandType = "exec" +) + +// ----------- Devfile Schema ---------- // +type Attributes map[string]string + +type ApiVersion string + +type Metadata struct { + + // Workspaces created from devfile, will use it as base and append random suffix. + // It's used when name is not defined. + GenerateName string `yaml:"generateName,omitempty" json:"generateName,omitempty"` + + // The name of the devfile. Workspaces created from devfile, will inherit this + // name + Name string `yaml:"name,omitempty" json:"name,omitempty"` +} + +// Description of the projects, containing names and sources locations +type Project struct { + + // The path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name." + ClonePath string `yaml:"clonePath,omitempty" json:"clonePath,omitempty"` + + // The Project Name + Name string `yaml:"name" json:"name"` + + // Describes the project's source - type and location + Source ProjectSource `yaml:"source" json:"source"` +} + +type ProjectSource struct { + Type ProjectType `yaml:"type" json:"type"` + + // Project's source location address. Should be URL for git and github located projects" + Location string `yaml:"location" json:"location"` + + // The name of the of the branch to check out after obtaining the source from the location. + // The branch has to already exist in the source otherwise the default branch is used. + // In case of git, this is also the name of the remote branch to push to. + Branch string `yaml:"branch,omitempty" json:"branch,omitempty"` + + // The id of the commit to reset the checked out branch to. + // Note that this is equivalent to 'startPoint' and provided for convenience. + CommitId string `yaml:"commitId,omitempty" json:"commitId,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `yaml:"sparseCheckoutDir,omitempty" json:"sparseCheckoutDir,omitempty"` + + // The tag or commit id to reset the checked out branch to. + StartPoint string `yaml:"startPoint,omitempty" json:"startPoint,omitempty"` + + // The name of the tag to reset the checked out branch to. + // Note that this is equivalent to 'startPoint' and provided for convenience. + Tag string `yaml:"tag,omitempty" json:"tag,omitempty"` +} + +type Command struct { + + // List of the actions of given command. Now the only one command must be + // specified in list but there are plans to implement supporting multiple actions + // commands. + Actions []CommandAction `yaml:"actions" json:"actions"` + + // Additional command attributes + Attributes Attributes `yaml:"attributes,omitempty" json:"attributes,omitempty"` + + // Describes the name of the command. Should be unique per commands set. + Name string `yaml:"name"` + + // Preview url + PreviewUrl CommandPreviewUrl `yaml:"previewUrl,omitempty" json:"previewUrl,omitempty"` +} + +type CommandPreviewUrl struct { + Port int32 `yaml:"port,omitempty" json:"port,omitempty"` + Path string `yaml:"path,omitempty" json:"path,omitempty"` +} + +type CommandAction struct { + + // The actual action command-line string + Command string `yaml:"command,omitempty" json:"command,omitempty"` + + // Describes component to which given action relates + Component string `yaml:"component,omitempty" json:"component,omitempty"` + + // the path relative to the location of the devfile to the configuration file + // defining one or more actions in the editor-specific format + Reference string `yaml:"reference,omitempty" json:"reference,omitempty"` + + // The content of the referenced configuration file that defines one or more + // actions in the editor-specific format + ReferenceContent string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` + + // Describes action type + Type CommandType `yaml:"type,omitempty" json:"type,omitempty"` + + // Working directory where the command should be executed + Workdir string `yaml:"workdir,omitempty" json:"workdir,omitempty"` +} + +type Component struct { + + // The name using which other places of this devfile (like commands) can refer to + // this component. This attribute is optional but must be unique in the devfile if + // specified. + Alias string `yaml:"alias,omitempty" json:"alias,omitempty"` + + // Describes whether projects sources should be mount to the component. + // `CHE_PROJECTS_ROOT` environment variable should contains a path where projects + // sources are mount + MountSources bool `yaml:"mountSources,omitempty" json:"mountSources,omitempty"` + + // Describes type of the component, e.g. whether it is an plugin or editor or + // other type + Type ComponentType `yaml:"type" json:"type"` + + // for type ChePlugin + ComponentChePlugin `yaml:",inline" json:",inline"` + + // for type=dockerfile + ComponentDockerimage `yaml:",inline" json:",inline"` +} + +type ComponentChePlugin struct { + Id string `yaml:"id,omitempty" json:"id,omitempty"` + Reference string `yaml:"reference,omitempty" json:"reference,omitempty"` + RegistryUrl string `yaml:"registryUrl,omitempty" json:"registryUrl,omitempty"` +} + +type ComponentCheEditor struct { + Id string `yaml:"id,omitempty" json:"id,omitempty"` + Reference string `yaml:"reference,omitempty" json:"reference,omitempty"` + RegistryUrl string `yaml:"registryUrl,omitempty" json:"registryUrl,omitempty"` +} + +type ComponentOpenshift struct { + Reference string `yaml:"reference,omitempty" json:"reference,omitempty"` + ReferenceContent string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` + Selector string `yaml:"selector,omitempty" json:"selector,omitempty"` + EntryPoints string `yaml:"entryPoints,omitempty" json:"entryPoints,omitempty"` + MemoryLimit string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` +} + +type ComponentKubernetes struct { + Reference string `yaml:"reference,omitempty" json:"reference,omitempty"` + ReferenceContent string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` + Selector string `yaml:"selector,omitempty" json:"selector,omitempty"` + EntryPoints string `yaml:"entryPoints,omitempty" json:"entryPoints,omitempty"` + MemoryLimit string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` +} + +type ComponentDockerimage struct { + Image string `yaml:"image,omitempty" json:"image,omitempty"` + MemoryLimit string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` + Command []string `yaml:"command,omitempty" json:"command,omitempty"` + Args []string `yaml:"args,omitempty" json:"args,omitempty"` + Volumes []DockerimageVolume `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Env []DockerimageEnv `yaml:"env,omitempty" json:"env,omitempty"` + Endpoints []DockerimageEndpoint `yaml:"endpoints,omitempty" json:"endpoints,omitempty"` +} + +type DockerimageVolume struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + ContainerPath string `yaml:"containerPath,omitempty" json:"containerPath,omitempty"` +} + +type DockerimageEnv struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Value string `yaml:"value,omitempty" json:"value,omitempty"` +} + +type DockerimageEndpoint struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Port int32 `yaml:"port,omitempty" json:"port,omitempty"` } diff --git a/pkg/devfile/parser/data/2.0.0/components.go b/pkg/devfile/parser/data/2.0.0/components.go new file mode 100644 index 00000000000..127499cd485 --- /dev/null +++ b/pkg/devfile/parser/data/2.0.0/components.go @@ -0,0 +1,41 @@ +package version200 + +import ( + "github.com/openshift/odo/pkg/devfile/parser/data/common" +) + +// GetComponents returns the slice of DevfileComponent objects parsed from the Devfile +func (d *Devfile200) GetComponents() []common.DevfileComponent { + return d.Components +} + +// GetCommands returns the slice of DevfileCommand objects parsed from the Devfile +func (d *Devfile200) GetCommands() []common.DevfileCommand { + return d.Commands +} + +// GetParent returns the DevfileParent object parsed from devfile +func (d *Devfile200) GetParent() common.DevfileParent { + return d.Parent +} + +// GetProjects returns the DevfileProject Object parsed from devfile +func (d *Devfile200) GetProjects() []common.DevfileProject { + return d.Projects +} + +// GetMetadata returns the DevfileMetadata Object parsed from devfile +func (d *Devfile200) GetMetadata() common.DevfileMetadata { + return d.Metadata +} + +// GetEvents returns the Events Object parsed from devfile +func (d *Devfile200) GetEvents() common.DevfileEvents { + return d.Events +} + +// GetAliasedComponents returns the slice of DevfileComponent objects that each have an alias +func (d *Devfile200) GetAliasedComponents() []common.DevfileComponent { + // V2 has name required in jsonSchema + return d.Components +} diff --git a/pkg/devfile/parser/data/2.0.0/devfileJsonSchema200.go b/pkg/devfile/parser/data/2.0.0/devfileJsonSchema200.go new file mode 100644 index 00000000000..431dd0fe252 --- /dev/null +++ b/pkg/devfile/parser/data/2.0.0/devfileJsonSchema200.go @@ -0,0 +1,2730 @@ +package version200 + +const JsonSchema200 = `{ + "description": "Devfile schema.", + "properties": { + "commands": { + "description": "Predefined, ready-to-use, workspace-related commands", + "items": { + "properties": { + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commands": { + "description": "The commands that comprise this composite command", + "items": { + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in a component container", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commandLine": { + "description": "The actual command-line string", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed", + "type": "string" + } + }, + "required": [ + "id", + "commandLine" + ], + "type": "object", + "additionalProperties": false + }, + "vscodeLaunch": { + "description": "Command providing the definition of a VsCode launch action", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "vscodeTask": { + "description": "Command providing the definition of a VsCode Task", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "vscodeTask" + ] + }, + { + "required": [ + "vscodeLaunch" + ] + }, + { + "required": [ + "composite" + ] + } + ] + }, + "type": "array" + }, + "components": { + "description": "List of the workspace components, such as editor and plugins, user-provided containers, or other types of components", + "items": { + "properties": { + "container": { + "description": "Allows adding and configuring workspace-related containers", + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "endpoints": { + "items": { + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "configuration": { + "properties": { + "cookiesAuthEnabled": { + "type": "boolean" + }, + "discoverable": { + "type": "boolean" + }, + "path": { + "type": "string" + }, + "protocol": { + "description": "The is the low-level protocol of traffic coming through this endpoint. Default value is \"tcp\"", + "type": "string" + }, + "public": { + "type": "boolean" + }, + "scheme": { + "description": "The is the URL scheme to use when accessing the endpoint. Default value is \"http\"", + "type": "string" + }, + "secure": { + "type": "boolean" + }, + "type": { + "enum": [ + "ide", + "terminal", + "ide-dev" + ], + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + }, + "required": [ + "name", + "targetPort" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "env": { + "description": "Environment variables used in this container", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "mountSources": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when ` + `mountSources` + ` is ` + `true` + `. When omitted, the value of the ` + `PROJECTS_ROOT` + ` environment variable is used.", + "type": "string" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "items": { + "description": "Volume that should be mounted to a component container", + "properties": { + "name": { + "description": "The volume mount name is the name of an existing ` + `Volume` + ` component. If no corresponding ` + `Volume` + ` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is ` + `/` + `.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + } + }, + "required": [ + "name", + "image" + ], + "type": "object", + "additionalProperties": false + }, + "kubernetes": { + "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "openshift": { + "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "plugin": { + "description": "Allows importing a plugin. Plugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as ` + `DevWorkspaceTemplate` + ` Kubernetes Custom Resources", + "properties": { + "commands": { + "description": "Overrides of commands encapsulated in a plugin. Overriding is done using a strategic merge", + "items": { + "properties": { + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commands": { + "description": "The commands that comprise this composite command", + "items": { + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in a component container", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commandLine": { + "description": "The actual command-line string", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "vscodeLaunch": { + "description": "Command providing the definition of a VsCode launch action", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "vscodeTask": { + "description": "Command providing the definition of a VsCode Task", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "vscodeTask" + ] + }, + { + "required": [ + "vscodeLaunch" + ] + }, + { + "required": [ + "composite" + ] + } + ] + }, + "type": "array" + }, + "components": { + "description": "Overrides of components encapsulated in a plugin. Overriding is done using a strategic merge", + "items": { + "properties": { + "container": { + "description": "Configuration overriding for a Container component", + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "endpoints": { + "items": { + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "configuration": { + "properties": { + "cookiesAuthEnabled": { + "type": "boolean" + }, + "discoverable": { + "type": "boolean" + }, + "path": { + "type": "string" + }, + "protocol": { + "description": "The is the low-level protocol of traffic coming through this endpoint. Default value is \"tcp\"", + "type": "string" + }, + "public": { + "type": "boolean" + }, + "scheme": { + "description": "The is the URL scheme to use when accessing the endpoint. Default value is \"http\"", + "type": "string" + }, + "secure": { + "type": "boolean" + }, + "type": { + "enum": [ + "ide", + "terminal", + "ide-dev" + ], + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "env": { + "description": "Environment variables used in this container", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "mountSources": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when ` + `mountSources` + ` is ` + `true` + `. When omitted, the value of the ` + `PROJECTS_ROOT` + ` environment variable is used.", + "type": "string" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "items": { + "description": "Volume that should be mounted to a component container", + "properties": { + "name": { + "description": "The volume mount name is the name of an existing ` + `Volume` + ` component. If no corresponding ` + `Volume` + ` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is ` + `/` + `.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "kubernetes": { + "description": "Configuration overriding for a Kubernetes component", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "openshift": { + "description": "Configuration overriding for an OpenShift component", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "volume": { + "description": "Configuration overriding for a Volume component", + "properties": { + "name": { + "description": "Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent", + "type": "string" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + } + ] + }, + "type": "array" + }, + "id": { + "description": "Id in a registry that contains a Devfile yaml file", + "type": "string" + }, + "kubernetes": { + "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "name": { + "description": "Optional name that allows referencing the component in commands, or inside a parent If omitted it will be infered from the location (uri or registryEntry)", + "type": "string" + }, + "registryUrl": { + "type": "string" + }, + "uri": { + "description": "Uri of a Devfile yaml file", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "id" + ] + }, + { + "required": [ + "kubernetes" + ] + } + ] + }, + "volume": { + "description": "Allows specifying the definition of a volume shared by several other components", + "properties": { + "name": { + "description": "Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent", + "type": "string" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + }, + { + "required": [ + "plugin" + ] + } + ] + }, + "type": "array" + }, + "events": { + "description": "Bindings of commands to events. Each command is referred-to by its name.", + "properties": { + "postStart": { + "description": "Names of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", + "items": { + "type": "string" + }, + "type": "array" + }, + "postStop": { + "description": "Names of commands that should be executed after stopping the workspace.", + "items": { + "type": "string" + }, + "type": "array" + }, + "preStart": { + "description": "Names of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD.", + "items": { + "type": "string" + }, + "type": "array" + }, + "preStop": { + "description": "Names of commands that should be executed before stopping the workspace.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "additionalProperties": false + }, + "parent": { + "description": "Parent workspace template", + "properties": { + "commands": { + "description": "Predefined, ready-to-use, workspace-related commands", + "items": { + "properties": { + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commands": { + "description": "The commands that comprise this composite command", + "items": { + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in a component container", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commandLine": { + "description": "The actual command-line string", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "vscodeLaunch": { + "description": "Command providing the definition of a VsCode launch action", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "vscodeTask": { + "description": "Command providing the definition of a VsCode Task", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "vscodeTask" + ] + }, + { + "required": [ + "vscodeLaunch" + ] + }, + { + "required": [ + "composite" + ] + } + ] + }, + "type": "array" + }, + "components": { + "description": "List of the workspace components, such as editor and plugins, user-provided containers, or other types of components", + "items": { + "properties": { + "container": { + "description": "Allows adding and configuring workspace-related containers", + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "endpoints": { + "items": { + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "configuration": { + "properties": { + "cookiesAuthEnabled": { + "type": "boolean" + }, + "discoverable": { + "type": "boolean" + }, + "path": { + "type": "string" + }, + "protocol": { + "description": "The is the low-level protocol of traffic coming through this endpoint. Default value is \"tcp\"", + "type": "string" + }, + "public": { + "type": "boolean" + }, + "scheme": { + "description": "The is the URL scheme to use when accessing the endpoint. Default value is \"http\"", + "type": "string" + }, + "secure": { + "type": "boolean" + }, + "type": { + "enum": [ + "ide", + "terminal", + "ide-dev" + ], + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "env": { + "description": "Environment variables used in this container", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "mountSources": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when ` + `mountSources` + ` is ` + `true` + `. When omitted, the value of the ` + `PROJECTS_ROOT` + ` environment variable is used.", + "type": "string" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "items": { + "description": "Volume that should be mounted to a component container", + "properties": { + "name": { + "description": "The volume mount name is the name of an existing ` + `Volume` + ` component. If no corresponding ` + `Volume` + ` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is ` + `/` + `.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "kubernetes": { + "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "openshift": { + "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "plugin": { + "description": "Allows importing a plugin. Plugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as ` + `DevWorkspaceTemplate` + ` Kubernetes Custom Resources", + "properties": { + "commands": { + "description": "Overrides of commands encapsulated in a plugin. Overriding is done using a strategic merge", + "items": { + "properties": { + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commands": { + "description": "The commands that comprise this composite command", + "items": { + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in a component container", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "commandLine": { + "description": "The actual command-line string", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false + }, + "vscodeLaunch": { + "description": "Command providing the definition of a VsCode launch action", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "vscodeTask": { + "description": "Command providing the definition of a VsCode Task", + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional map of free-form additional command attributes", + "type": "object" + }, + "group": { + "description": "Defines the group this command is part of", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "enum": [ + "build", + "run", + "test", + "debug" + ], + "type": "string" + } + }, + "required": [ + "kind" + ], + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events.", + "type": "string" + }, + "inlined": { + "description": "Inlined content of the VsCode configuration", + "type": "string" + }, + "uri": { + "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", + "type": "string" + } + }, + "required": [ + "id" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "vscodeTask" + ] + }, + { + "required": [ + "vscodeLaunch" + ] + }, + { + "required": [ + "composite" + ] + } + ] + }, + "type": "array" + }, + "components": { + "description": "Overrides of components encapsulated in a plugin. Overriding is done using a strategic merge", + "items": { + "properties": { + "container": { + "description": "Configuration overriding for a Container component", + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image.", + "items": { + "type": "string" + }, + "type": "array" + }, + "endpoints": { + "items": { + "properties": { + "attributes": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "configuration": { + "properties": { + "cookiesAuthEnabled": { + "type": "boolean" + }, + "discoverable": { + "type": "boolean" + }, + "path": { + "type": "string" + }, + "protocol": { + "description": "The is the low-level protocol of traffic coming through this endpoint. Default value is \"tcp\"", + "type": "string" + }, + "public": { + "type": "boolean" + }, + "scheme": { + "description": "The is the URL scheme to use when accessing the endpoint. Default value is \"http\"", + "type": "string" + }, + "secure": { + "type": "boolean" + }, + "type": { + "enum": [ + "ide", + "terminal", + "ide-dev" + ], + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "env": { + "description": "Environment variables used in this container", + "items": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "mountSources": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when ` + `mountSources` + ` is ` + `true` + `. When omitted, the value of the ` + `PROJECTS_ROOT` + ` environment variable is used.", + "type": "string" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "items": { + "description": "Volume that should be mounted to a component container", + "properties": { + "name": { + "description": "The volume mount name is the name of an existing ` + `Volume` + ` component. If no corresponding ` + `Volume` + ` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is ` + `/` + `.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "kubernetes": { + "description": "Configuration overriding for a Kubernetes component", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "openshift": { + "description": "Configuration overriding for an OpenShift component", + "properties": { + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "name": { + "description": "Mandatory name that allows referencing the component in commands, or inside a parent", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ] + }, + "volume": { + "description": "Configuration overriding for a Volume component", + "properties": { + "name": { + "description": "Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent", + "type": "string" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + } + ] + }, + "type": "array" + }, + "id": { + "description": "Id in a registry that contains a Devfile yaml file", + "type": "string" + }, + "kubernetes": { + "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "name": { + "description": "Optional name that allows referencing the component in commands, or inside a parent If omitted it will be infered from the location (uri or registryEntry)", + "type": "string" + }, + "registryUrl": { + "type": "string" + }, + "uri": { + "description": "Uri of a Devfile yaml file", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "id" + ] + }, + { + "required": [ + "kubernetes" + ] + } + ] + }, + "volume": { + "description": "Allows specifying the definition of a volume shared by several other components", + "properties": { + "name": { + "description": "Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent", + "type": "string" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + }, + { + "required": [ + "plugin" + ] + } + ] + }, + "type": "array" + }, + "events": { + "description": "Bindings of commands to events. Each command is referred-to by its name.", + "properties": { + "postStart": { + "description": "Names of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", + "items": { + "type": "string" + }, + "type": "array" + }, + "postStop": { + "description": "Names of commands that should be executed after stopping the workspace.", + "items": { + "type": "string" + }, + "type": "array" + }, + "preStart": { + "description": "Names of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD.", + "items": { + "type": "string" + }, + "type": "array" + }, + "preStop": { + "description": "Names of commands that should be executed before stopping the workspace.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "additionalProperties": false + }, + "id": { + "description": "Id in a registry that contains a Devfile yaml file", + "type": "string" + }, + "kubernetes": { + "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false + }, + "projects": { + "description": "Projects worked on in the workspace, containing names and sources locations", + "items": { + "properties": { + "clonePath": { + "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "properties": { + "branch": { + "description": "The branch to check", + "type": "string" + }, + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + }, + "startPoint": { + "description": "The tag or commit id to reset the checked out branch to", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "github": { + "description": "Project's GitHub source", + "properties": { + "branch": { + "description": "The branch to check", + "type": "string" + }, + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + }, + "startPoint": { + "description": "The tag or commit id to reset the checked out branch to", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string" + }, + "zip": { + "description": "Project's Zip source", + "properties": { + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "github" + ] + }, + { + "required": [ + "zip" + ] + } + ] + }, + "type": "array" + }, + "registryUrl": { + "type": "string" + }, + "uri": { + "description": "Uri of a Devfile yaml file", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "id" + ] + }, + { + "required": [ + "kubernetes" + ] + } + ] + }, + "projects": { + "description": "Projects worked on in the workspace, containing names and sources locations", + "items": { + "properties": { + "clonePath": { + "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "properties": { + "branch": { + "description": "The branch to check", + "type": "string" + }, + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + }, + "startPoint": { + "description": "The tag or commit id to reset the checked out branch to", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "github": { + "description": "Project's GitHub source", + "properties": { + "branch": { + "description": "The branch to check", + "type": "string" + }, + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + }, + "startPoint": { + "description": "The tag or commit id to reset the checked out branch to", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string" + }, + "zip": { + "description": "Project's Zip source", + "properties": { + "location": { + "description": "Project's source location address. Should be URL for git and github located projects, or; file:// for zip", + "type": "string" + }, + "sparseCheckoutDir": { + "description": "Part of project to populate in the working directory.", + "type": "string" + } + }, + "type": "object", + "additionalProperties": false + } + }, + "required": [ + "name" + ], + "type": "object", + "additionalProperties": false, + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "github" + ] + }, + { + "required": [ + "zip" + ] + } + ] + }, + "type": "array" + }, + "metadata": { + "type": "object", + "description": "Optional metadata", + "properties": { + "version": { + "type": "string", + "description": "Optional semver-compatible version", + "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" + }, + "name": { + "type": "string", + "description": "Optional devfile name" + } + } + }, + "schemaVersion": { + "type": "string", + "description": "Devfile schema version", + "pattern": "^([2-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" + } + }, + "type": "object", + "additionalProperties": false, + "required": [ + "schemaVersion" + ] +}` diff --git a/pkg/devfile/parser/data/2.0.0/types.go b/pkg/devfile/parser/data/2.0.0/types.go new file mode 100644 index 00000000000..72d5177c462 --- /dev/null +++ b/pkg/devfile/parser/data/2.0.0/types.go @@ -0,0 +1,410 @@ +package version200 + +import "github.com/openshift/odo/pkg/devfile/parser/data/common" + +// CommandGroupType describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type CommandGroupType string + +const ( + BuildCommandGroupType CommandGroupType = "build" + RunCommandGroupType CommandGroupType = "run" + TestCommandGroupType CommandGroupType = "test" + DebugCommandGroupType CommandGroupType = "debug" +) + +// Devfile200 Devfile schema. +type Devfile200 struct { + + // Predefined, ready-to-use, workspace-related commands + Commands []common.DevfileCommand `json:"commands,omitempty"` + + // List of the workspace components, such as editor and plugins, user-provided containers, or other types of components + Components []common.DevfileComponent `json:"components,omitempty"` + + // Bindings of commands to events. Each command is referred-to by its name. + Events common.DevfileEvents `json:"events,omitempty"` + + // Optional metadata + Metadata common.DevfileMetadata `json:"metadata,omitempty"` + + // Parent workspace template + Parent common.DevfileParent `json:"parent,omitempty"` + + // Projects worked on in the workspace, containing names and sources locations + Projects []common.DevfileProject `json:"projects,omitempty"` + + // Devfile schema version + SchemaVersion string `json:"schemaVersion"` +} + +// CommandsItems +type Command struct { + + // Composite command that allows executing several sub-commands either sequentially or concurrently + Composite *Composite `json:"composite,omitempty"` + + // CLI Command executed in a component container + Exec *Exec `json:"exec,omitempty"` + + // Command providing the definition of a VsCode launch action + VscodeLaunch *VscodeLaunch `json:"vscodeLaunch,omitempty"` + + // Command providing the definition of a VsCode Task + VscodeTask *VscodeTask `json:"vscodeTask,omitempty"` +} + +// ComponentsItems +type Component struct { + + // Allows adding and configuring workspace-related containers + Container *Container `json:"container,omitempty"` + + // Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production. + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` + + // Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production. + Openshift *Openshift `json:"openshift,omitempty"` + + // Allows importing a plugin. Plugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as `DevWorkspaceTemplate` Kubernetes Custom Resources + Plugin *Plugin `json:"plugin,omitempty"` + + // Allows specifying the definition of a volume shared by several other components + Volume *Volume `json:"volume,omitempty"` +} + +// Composite Composite command that allows executing several sub-commands either sequentially or concurrently +type Composite struct { + + // Optional map of free-form additional command attributes + Attributes map[string]string `json:"attributes,omitempty"` + + // The commands that comprise this composite command + Commands []string `json:"commands,omitempty"` + + // Defines the group this command is part of + Group *Group `json:"group,omitempty"` + + // Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events. + Id string `json:"id"` + + // Optional label that provides a label for this command to be used in Editor UI menus for example + Label string `json:"label,omitempty"` + + // Indicates if the sub-commands should be executed concurrently + Parallel bool `json:"parallel,omitempty"` +} + +// Configuration +type Configuration struct { + CookiesAuthEnabled bool `json:"cookiesAuthEnabled,omitempty"` + Discoverable bool `json:"discoverable,omitempty"` + Path string `json:"path,omitempty"` + + // The is the low-level protocol of traffic coming through this endpoint. Default value is "tcp" + Protocol string `json:"protocol,omitempty"` + Public bool `json:"public,omitempty"` + + // The is the URL scheme to use when accessing the endpoint. Default value is "http" + Scheme string `json:"scheme,omitempty"` + Secure bool `json:"secure,omitempty"` + Type string `json:"type,omitempty"` +} + +// Container Allows adding and configuring workspace-related containers +type Container struct { + + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image. + Args []string `json:"args,omitempty"` + + // The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image. + Command []string `json:"command,omitempty"` + + Endpoints []*Endpoint `json:"endpoints,omitempty"` + + // Environment variables used in this container + Env []*Env `json:"env,omitempty"` + Image string `json:"image,omitempty"` + MemoryLimit string `json:"memoryLimit,omitempty"` + MountSources bool `json:"mountSources,omitempty"` + Name string `json:"name"` + + // Optional specification of the path in the container where project sources should be transferred/mounted when `mountSources` is `true`. When omitted, the value of the `PROJECTS_ROOT` environment variable is used. + SourceMapping string `json:"sourceMapping,omitempty"` + + // List of volumes mounts that should be mounted is this container. + VolumeMounts []*VolumeMount `json:"volumeMounts,omitempty"` +} + +// Endpoint +type Endpoint struct { + Attributes map[string]string `json:"attributes,omitempty"` + Configuration *Configuration `json:"configuration,omitempty"` + Name string `json:"name"` + TargetPort int32 `json:"targetPort"` +} + +// Env +type Env struct { + Name string `json:"name"` + Value string `json:"value"` +} + +// Events Bindings of commands to events. Each command is referred-to by its name. +type Events struct { + + // Names of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser. + PostStart []string `json:"postStart,omitempty"` + + // Names of commands that should be executed after stopping the workspace. + PostStop []string `json:"postStop,omitempty"` + + // Names of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD. + PreStart []string `json:"preStart,omitempty"` + + // Names of commands that should be executed before stopping the workspace. + PreStop []string `json:"preStop,omitempty"` +} + +// Exec CLI Command executed in a component container +type Exec struct { + + // Optional map of free-form additional command attributes + Attributes map[string]string `json:"attributes,omitempty"` + + // The actual command-line string + CommandLine string `json:"commandLine,omitempty"` + + // Describes component to which given action relates + Component string `json:"component,omitempty"` + + // Optional list of environment variables that have to be set before running the command + Env []*Env `json:"env,omitempty"` + + // Defines the group this command is part of + Group *Group `json:"group,omitempty"` + + // Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events. + Id string `json:"id"` + + // Optional label that provides a label for this command to be used in Editor UI menus for example + Label string `json:"label,omitempty"` + + // Working directory where the command should be executed + WorkingDir string `json:"workingDir,omitempty"` +} + +// Git Project's Git source +type Git struct { + + // The branch to check + Branch string `json:"branch,omitempty"` + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` + + // The tag or commit id to reset the checked out branch to + StartPoint string `json:"startPoint,omitempty"` +} + +// Github Project's GitHub source +type Github struct { + + // The branch to check + Branch string `json:"branch,omitempty"` + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` + + // The tag or commit id to reset the checked out branch to + StartPoint string `json:"startPoint,omitempty"` +} + +// Group Defines the group this command is part of +type Group struct { + + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` + + // Kind of group the command is part of + Kind CommandGroupType `json:"kind"` +} + +// Kubernetes Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production. +type Kubernetes struct { + + // Inlined manifest + Inlined string `json:"inlined,omitempty"` + + // Mandatory name that allows referencing the component in commands, or inside a parent + Name string `json:"name"` + + // Location in a file fetched from a uri. + Uri string `json:"uri,omitempty"` +} + +// Metadata Optional metadata +type Metadata struct { + + // Optional devfile name + Name string `json:"name,omitempty"` + + // Optional semver-compatible version + Version string `json:"version,omitempty"` +} + +// Openshift Configuration overriding for an OpenShift component +type Openshift struct { + + // Inlined manifest + Inlined string `json:"inlined,omitempty"` + + // Mandatory name that allows referencing the component in commands, or inside a parent + Name string `json:"name"` + + // Location in a file fetched from a uri. + Uri string `json:"uri,omitempty"` +} + +// Parent Parent workspace template +type Parent struct { + + // Predefined, ready-to-use, workspace-related commands + Commands []*Command `json:"commands,omitempty"` + + // List of the workspace components, such as editor and plugins, user-provided containers, or other types of components + Components []*Component `json:"components,omitempty"` + + // Bindings of commands to events. Each command is referred-to by its name. + Events *Events `json:"events,omitempty"` + + // Id in a registry that contains a Devfile yaml file + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` + + // Projects worked on in the workspace, containing names and sources locations + Projects []*Project `json:"projects,omitempty"` + + RegistryUrl string `json:"registryUrl,omitempty"` + + // Uri of a Devfile yaml file + Uri string `json:"uri,omitempty"` +} + +// Plugin Allows importing a plugin. Plugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as `DevWorkspaceTemplate` Kubernetes Custom Resources +type Plugin struct { + + // Overrides of commands encapsulated in a plugin. Overriding is done using a strategic merge + Commands []*Command `json:"commands,omitempty"` + + // Overrides of components encapsulated in a plugin. Overriding is done using a strategic merge + Components []*Component `json:"components,omitempty"` + + // Id in a registry that contains a Devfile yaml file + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` + + // Optional name that allows referencing the component in commands, or inside a parent If omitted it will be infered from the location (uri or registryEntry) + Name string `json:"name,omitempty"` + RegistryUrl string `json:"registryUrl,omitempty"` + + // Uri of a Devfile yaml file + Uri string `json:"uri,omitempty"` +} + +// ProjectsItems +type Project struct { + + // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. + ClonePath string `json:"clonePath,omitempty"` + + // Project's Git source + Git *Git `json:"git,omitempty"` + + // Project's GitHub source + Github *Github `json:"github,omitempty"` + + // Project name + Name string `json:"name"` + + // Project's Zip source + Zip *Zip `json:"zip,omitempty"` +} + +// Volume Allows specifying the definition of a volume shared by several other components +type Volume struct { + + // Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent + Name string `json:"name"` + + // Size of the volume + Size string `json:"size,omitempty"` +} + +// VolumeMountsItems Volume that should be mounted to a component container +type VolumeMount struct { + + // The volume mount name is the name of an existing `Volume` component. If no corresponding `Volume` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files. + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is `/`. + Path string `json:"path,omitempty"` +} + +// VscodeLaunch Command providing the definition of a VsCode launch action +type VscodeLaunch struct { + + // Optional map of free-form additional command attributes + Attributes map[string]string `json:"attributes,omitempty"` + + // Defines the group this command is part of + Group *Group `json:"group,omitempty"` + + // Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events. + Id string `json:"id"` + + // Inlined content of the VsCode configuration + Inlined string `json:"inlined,omitempty"` + + // Location as an absolute of relative URI the VsCode configuration will be fetched from + Uri string `json:"uri,omitempty"` +} + +// VscodeTask Command providing the definition of a VsCode Task +type VscodeTask struct { + + // Optional map of free-form additional command attributes + Attributes map[string]string `json:"attributes,omitempty"` + + // Defines the group this command is part of + Group *Group `json:"group,omitempty"` + + // Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events. + Id string `json:"id"` + + // Inlined content of the VsCode configuration + Inlined string `json:"inlined,omitempty"` + + // Location as an absolute of relative URI the VsCode configuration will be fetched from + Uri string `json:"uri,omitempty"` +} + +// Zip Project's Zip source +type Zip struct { + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` +} diff --git a/pkg/devfile/parser/data/common/types.go b/pkg/devfile/parser/data/common/types.go index 6c21062cb94..7fd9c55ecea 100644 --- a/pkg/devfile/parser/data/common/types.go +++ b/pkg/devfile/parser/data/common/types.go @@ -1,210 +1,326 @@ package common -// -------------- Supported devfile project types ------------ // -// DevfileProjectType store valid devfile project types -type DevfileProjectType string +// DevfileComponentType describes the type of component. +// Only one of the following component type may be specified +// To support some print actions +type DevfileComponentType string const ( - DevfileProjectTypeGit DevfileProjectType = "git" + ContainerComponentType DevfileComponentType = "Container" + KubernetesComponentType DevfileComponentType = "Kubernetes" + OpenshiftComponentType DevfileComponentType = "Openshift" + PluginComponentType DevfileComponentType = "Plugin" + VolumeComponentType DevfileComponentType = "Volume" + CustomComponentType DevfileComponentType = "Custom" ) -var SupportedDevfileProjectTypes = []DevfileProjectType{DevfileProjectTypeGit} - -// -------------- Supported devfile component types ------------ // -// DevfileComponentType stores valid devfile component types -type DevfileComponentType string +// CommandGroupType describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug +type DevfileCommandGroupType string const ( - DevfileComponentTypeCheEditor DevfileComponentType = "cheEditor" - DevfileComponentTypeChePlugin DevfileComponentType = "chePlugin" - DevfileComponentTypeDockerimage DevfileComponentType = "dockerimage" - DevfileComponentTypeKubernetes DevfileComponentType = "kubernetes" - DevfileComponentTypeOpenshift DevfileComponentType = "openshift" + BuildCommandGroupType DevfileCommandGroupType = "build" + RunCommandGroupType DevfileCommandGroupType = "run" + TestCommandGroupType DevfileCommandGroupType = "test" + DebugCommandGroupType DevfileCommandGroupType = "debug" + // To Support V1 + InitCommandGroupType DevfileCommandGroupType = "init" ) -// -------------- Supported devfile command types ------------ // -type DevfileCommandType string +// DevfileMetadata metadata for devfile +type DevfileMetadata struct { -const ( - DevfileCommandTypeInit DevfileCommandType = "init" - DevfileCommandTypeBuild DevfileCommandType = "build" - DevfileCommandTypeRun DevfileCommandType = "run" - DevfileCommandTypeDebug DevfileCommandType = "debug" - DevfileCommandTypeExec DevfileCommandType = "exec" -) + // Name Optional devfile name + Name string `json:"name,omitempty"` -// ----------- Devfile Schema ---------- // -type Attributes map[string]string + // Version Optional semver-compatible version + Version string `json:"version,omitempty"` +} -type ApiVersion string +// DevfileCommand command specified in devfile +type DevfileCommand struct { + // CLI Command executed in a component container + Exec *Exec `json:"exec,omitempty"` +} -type DevfileMetadata struct { +// DevfileComponent component specified in devfile +type DevfileComponent struct { - // Workspaces created from devfile, will use it as base and append random suffix. - // It's used when name is not defined. - GenerateName *string `yaml:"generateName,omitempty" json:"generateName,omitempty"` + // Allows adding and configuring workspace-related containers + Container *Container `json:"container,omitempty"` - // The name of the devfile. Workspaces created from devfile, will inherit this - // name - Name *string `yaml:"name,omitempty" json:"name,omitempty"` -} + // Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production. + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` -// Description of the projects, containing names and sources locations -type DevfileProject struct { + // Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production. + Openshift *Openshift `json:"openshift,omitempty"` - // The path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name." - ClonePath *string `yaml:"clonePath,omitempty" json:"clonePath,omitempty"` + // Allows specifying the definition of a volume shared by several other components + Volume *Volume `json:"volume,omitempty"` +} + +// Configuration +type Configuration struct { + CookiesAuthEnabled bool `json:"cookiesAuthEnabled,omitempty"` + Discoverable bool `json:"discoverable,omitempty"` + Path string `json:"path,omitempty"` - // The Project Name - Name string `yaml:"name" json:"name"` + // The is the low-level protocol of traffic coming through this endpoint. Default value is "tcp" + Protocol string `json:"protocol,omitempty"` + Public bool `json:"public,omitempty"` - // Describes the project's source - type and location - Source DevfileProjectSource `yaml:"source" json:"source"` + // The is the URL scheme to use when accessing the endpoint. Default value is "http" + Scheme string `json:"scheme,omitempty"` + Secure bool `json:"secure,omitempty"` + Type string `json:"type,omitempty"` } -type DevfileProjectSource struct { - Type DevfileProjectType `yaml:"type" json:"type"` +// Container Allows adding and configuring workspace-related containers +type Container struct { - // Project's source location address. Should be URL for git and github located projects" - Location string `yaml:"location" json:"location"` + // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. Defaults to an empty array, meaning use whatever is defined in the image. + Args []string `json:"args,omitempty"` - // The name of the of the branch to check out after obtaining the source from the location. - // The branch has to already exist in the source otherwise the default branch is used. - // In case of git, this is also the name of the remote branch to push to. - Branch *string `yaml:"branch,omitempty" json:"branch,omitempty"` + // The command to run in the dockerimage component instead of the default one provided in the image. Defaults to an empty array, meaning use whatever is defined in the image. + Command []string `json:"command,omitempty"` - // The id of the commit to reset the checked out branch to. - // Note that this is equivalent to 'startPoint' and provided for convenience. - CommitId *string `yaml:"commitId,omitempty" json:"commitId,omitempty"` + Endpoints []Endpoint `json:"endpoints,omitempty"` - // Part of project to populate in the working directory. - SparseCheckoutDir *string `yaml:"sparseCheckoutDir,omitempty" json:"sparseCheckoutDir,omitempty"` + // Environment variables used in this container + Env []Env `json:"env,omitempty"` + Image string `json:"image,omitempty"` + MemoryLimit string `json:"memoryLimit,omitempty"` + MountSources bool `json:"mountSources,omitempty"` + Name string `json:"name"` - // The tag or commit id to reset the checked out branch to. - StartPoint *string `yaml:"startPoint,omitempty" json:"startPoint,omitempty"` + // Optional specification of the path in the container where project sources should be transferred/mounted when `mountSources` is `true`. When omitted, the value of the `PROJECTS_ROOT` environment variable is used. + SourceMapping string `json:"sourceMapping,omitempty"` - // The name of the tag to reset the checked out branch to. - // Note that this is equivalent to 'startPoint' and provided for convenience. - Tag *string `yaml:"tag,omitempty" json:"tag,omitempty"` + // List of volumes mounts that should be mounted is this container. + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"` } -type DevfileCommand struct { +// Endpoint +type Endpoint struct { + Attributes map[string]string `json:"attributes,omitempty"` + Configuration *Configuration `json:"configuration,omitempty"` + Name string `json:"name"` + TargetPort int32 `json:"targetPort"` +} - // List of the actions of given command. Now the only one command must be - // specified in list but there are plans to implement supporting multiple actions - // commands. - Actions []DevfileCommandAction `yaml:"actions" json:"actions"` +// Env +type Env struct { + Name string `json:"name"` + Value string `json:"value"` +} - // Additional command attributes - Attributes Attributes `yaml:"attributes,omitempty" json:"attributes,omitempty"` +// Events Bindings of commands to events. Each command is referred-to by its name. +type DevfileEvents struct { - // Describes the name of the command. Should be unique per commands set. - Name string `yaml:"name"` + // Names of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser. + PostStart []string `json:"postStart,omitempty"` - // Preview url - PreviewUrl DevfileCommandPreviewUrl `yaml:"previewUrl,omitempty" json:"previewUrl,omitempty"` -} + // Names of commands that should be executed after stopping the workspace. + PostStop []string `json:"postStop,omitempty"` -type DevfileCommandPreviewUrl struct { - Port *int32 `yaml:"port,omitempty" json:"port,omitempty"` - Path *string `yaml:"path,omitempty" json:"path,omitempty"` + // Names of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD. + PreStart []string `json:"preStart,omitempty"` + + // Names of commands that should be executed before stopping the workspace. + PreStop []string `json:"preStop,omitempty"` } -type DevfileCommandAction struct { +// Exec CLI Command executed in a component container +type Exec struct { + + // Optional map of free-form additional command attributes + Attributes map[string]string `json:"attributes,omitempty"` - // The actual action command-line string - Command *string `yaml:"command,omitempty" json:"command,omitempty"` + // The actual command-line string + CommandLine string `json:"commandLine,omitempty"` // Describes component to which given action relates - Component *string `yaml:"component,omitempty" json:"component,omitempty"` + Component string `json:"component,omitempty"` - // the path relative to the location of the devfile to the configuration file - // defining one or more actions in the editor-specific format - Reference *string `yaml:"reference,omitempty" json:"reference,omitempty"` + // Optional list of environment variables that have to be set before running the command + Env []Env `json:"env,omitempty"` - // The content of the referenced configuration file that defines one or more - // actions in the editor-specific format - ReferenceContent *string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` + // Defines the group this command is part of + Group *Group `json:"group,omitempty"` - // Describes action type - Type *DevfileCommandType `yaml:"type,omitempty" json:"type,omitempty"` + // Mandatory identifier that allows referencing this command in composite commands, or from a parent, or in events. + Id string `json:"id"` + + // Optional label that provides a label for this command to be used in Editor UI menus for example + Label string `json:"label,omitempty"` // Working directory where the command should be executed - Workdir *string `yaml:"workdir,omitempty" json:"workdir,omitempty"` + WorkingDir string `json:"workingDir,omitempty"` } -type DevfileComponent struct { +// Git Project's Git source +type Git struct { + + // The branch to check + Branch string `json:"branch,omitempty"` + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` - // The name using which other places of this devfile (like commands) can refer to - // this component. This attribute is optional but must be unique in the devfile if - // specified. - Alias *string `yaml:"alias,omitempty" json:"alias,omitempty"` + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` + + // The tag or commit id to reset the checked out branch to + StartPoint string `json:"startPoint,omitempty"` +} + +// Github Project's GitHub source +type Github struct { + + // The branch to check + Branch string `json:"branch,omitempty"` + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` - // Describes whether projects sources should be mount to the component. - // `CHE_PROJECTS_ROOT` environment variable should contains a path where projects - // sources are mount - MountSources bool `yaml:"mountSources,omitempty" json:"mountSources,omitempty"` + // The tag or commit id to reset the checked out branch to + StartPoint string `json:"startPoint,omitempty"` +} - // Describes type of the component, e.g. whether it is an plugin or editor or - // other type - Type DevfileComponentType `yaml:"type" json:"type"` +// Group Defines the group this command is part of +type Group struct { - // for type ChePlugin - DevfileComponentChePlugin `yaml:",inline" json:",inline"` + // Identifies the default command for a given group kind + IsDefault bool `json:"isDefault,omitempty"` - // for type=dockerfile - DevfileComponentDockerimage `yaml:",inline" json:",inline"` + // Kind of group the command is part of + Kind DevfileCommandGroupType `json:"kind"` } -type DevfileComponentChePlugin struct { - Id *string `yaml:"id,omitempty" json:"id,omitempty"` - Reference *string `yaml:"reference,omitempty" json:"reference,omitempty"` - RegistryUrl *string `yaml:"registryUrl,omitempty" json:"registryUrl,omitempty"` +// Kubernetes Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production. +type Kubernetes struct { + + // Inlined manifest + Inlined string `json:"inlined,omitempty"` + + // Mandatory name that allows referencing the component in commands, or inside a parent + Name string `json:"name"` + + // Location in a file fetched from a uri. + Uri string `json:"uri,omitempty"` } -type DevfileComponentCheEditor struct { - Id *string `yaml:"id,omitempty" json:"id,omitempty"` - Reference *string `yaml:"reference,omitempty" json:"reference,omitempty"` - RegistryUrl *string `yaml:"registryUrl,omitempty" json:"registryUrl,omitempty"` +// Openshift Configuration overriding for an OpenShift component +type Openshift struct { + + // Inlined manifest + Inlined string `json:"inlined,omitempty"` + + // Mandatory name that allows referencing the component in commands, or inside a parent + Name string `json:"name"` + + // Location in a file fetched from a uri. + Uri string `json:"uri,omitempty"` } -type DevfileComponentOpenshift struct { - Reference *string `yaml:"reference,omitempty" json:"reference,omitempty"` - ReferenceContent *string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` - Selector *string `yaml:"selector,omitempty" json:"selector,omitempty"` - EntryPoints *string `yaml:"entryPoints,omitempty" json:"entryPoints,omitempty"` - MemoryLimit *string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` +// DevfileParent Parent workspace template +type DevfileParent struct { + + // Predefined, ready-to-use, workspace-related commands + Commands []*DevfileCommand `json:"commands,omitempty"` + + // List of the workspace components, such as editor and plugins, user-provided containers, or other types of components + Components []*DevfileComponent `json:"components,omitempty"` + + // Bindings of commands to events. Each command is referred-to by its name. + Events *DevfileEvents `json:"events,omitempty"` + + // Id in a registry that contains a Devfile yaml file + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` + + // Projects worked on in the workspace, containing names and sources locations + Projects []*DevfileProject `json:"projects,omitempty"` + + RegistryUrl string `json:"registryUrl,omitempty"` + + // Uri of a Devfile yaml file + Uri string `json:"uri,omitempty"` } -type DevfileComponentKubernetes struct { - Reference *string `yaml:"reference,omitempty" json:"reference,omitempty"` - ReferenceContent *string `yaml:"referenceContent,omitempty" json:"referenceContent,omitempty"` - Selector *string `yaml:"selector,omitempty" json:"selector,omitempty"` - EntryPoints *string `yaml:"entryPoints,omitempty" json:"entryPoints,omitempty"` - MemoryLimit *string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` +// Plugin Allows importing a plugin. Plugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as `DevWorkspaceTemplate` Kubernetes Custom Resources +type Plugin struct { + + // Overrides of commands encapsulated in a plugin. Overriding is done using a strategic merge + Commands []*DevfileCommand `json:"commands,omitempty"` + + // Overrides of components encapsulated in a plugin. Overriding is done using a strategic merge + Components []*DevfileComponent `json:"components,omitempty"` + + // Id in a registry that contains a Devfile yaml file + Id string `json:"id,omitempty"` + + // Reference to a Kubernetes CRD of type DevWorkspaceTemplate + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` + + // Optional name that allows referencing the component in commands, or inside a parent If omitted it will be infered from the location (uri or registryEntry) + Name string `json:"name,omitempty"` + RegistryUrl string `json:"registryUrl,omitempty"` + + // Uri of a Devfile yaml file + Uri string `json:"uri,omitempty"` } -type DevfileComponentDockerimage struct { - Image *string `yaml:"image,omitempty" json:"image,omitempty"` - MemoryLimit *string `yaml:"memoryLimit,omitempty" json:"memoryLimit,omitempty"` - Command []string `yaml:"command,omitempty" json:"command,omitempty"` - Args []string `yaml:"args,omitempty" json:"args,omitempty"` - Volumes []DockerimageVolume `yaml:"volumes,omitempty" json:"volumes,omitempty"` - Env []DockerimageEnv `yaml:"env,omitempty" json:"env,omitempty"` - Endpoints []DockerimageEndpoint `yaml:"endpoints,omitempty" json:"endpoints,omitempty"` +// DevfileProject project defined in devfile +type DevfileProject struct { + + // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. + ClonePath string `json:"clonePath,omitempty"` + + // Project's Git source + Git *Git `json:"git,omitempty"` + + // Project's GitHub source + Github *Github `json:"github,omitempty"` + + // Project name + Name string `json:"name"` + + // Project's Zip source + Zip *Zip `json:"zip,omitempty"` } -type DockerimageVolume struct { - Name *string `yaml:"name,omitempty" json:"name,omitempty"` - ContainerPath *string `yaml:"containerPath,omitempty" json:"containerPath,omitempty"` +// Volume Allows specifying the definition of a volume shared by several other components +type Volume struct { + + // Mandatory name that allows referencing the Volume component in Container volume mounts or inside a parent + Name string `json:"name"` + + // Size of the volume + Size string `json:"size,omitempty"` } -type DockerimageEnv struct { - Name *string `yaml:"name,omitempty" json:"name,omitempty"` - Value *string `yaml:"value,omitempty" json:"value,omitempty"` +// VolumeMountsItems Volume that should be mounted to a component container +type VolumeMount struct { + + // The volume mount name is the name of an existing `Volume` component. If no corresponding `Volume` component exist it is implicitly added. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files. + Name string `json:"name"` + + // The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is `/`. + Path string `json:"path,omitempty"` } -type DockerimageEndpoint struct { - Name *string `yaml:"name,omitempty" json:"name,omitempty"` - Port *int32 `yaml:"port,omitempty" json:"port,omitempty"` +// Zip Project's Zip source +type Zip struct { + + // Project's source location address. Should be URL for git and github located projects, or; file:// for zip + Location string `json:"location,omitempty"` + + // Part of project to populate in the working directory. + SparseCheckoutDir string `json:"sparseCheckoutDir,omitempty"` } diff --git a/pkg/devfile/parser/data/interface.go b/pkg/devfile/parser/data/interface.go index 8b6f8221a65..314bbfa603f 100644 --- a/pkg/devfile/parser/data/interface.go +++ b/pkg/devfile/parser/data/interface.go @@ -6,6 +6,9 @@ import ( // DevfileData is an interface that defines functions for Devfile data operations type DevfileData interface { + GetMetadata() common.DevfileMetadata + GetParent() common.DevfileParent + GetEvents() common.DevfileEvents GetComponents() []common.DevfileComponent GetAliasedComponents() []common.DevfileComponent GetProjects() []common.DevfileProject diff --git a/pkg/devfile/parser/data/versions.go b/pkg/devfile/parser/data/versions.go index 8b025f6e410..10615f285e9 100644 --- a/pkg/devfile/parser/data/versions.go +++ b/pkg/devfile/parser/data/versions.go @@ -4,6 +4,7 @@ import ( "reflect" v100 "github.com/openshift/odo/pkg/devfile/parser/data/1.0.0" + v200 "github.com/openshift/odo/pkg/devfile/parser/data/2.0.0" ) // SupportedApiVersions stores the supported devfile API versions @@ -12,10 +13,11 @@ type supportedApiVersion string // Supported devfile API versions in odo const ( apiVersion100 supportedApiVersion = "1.0.0" + apiVersion200 supportedApiVersion = "2.0.0" ) // List of supported devfile API versions -var supportedApiVersionsList = []supportedApiVersion{apiVersion100} +var supportedApiVersionsList = []supportedApiVersion{apiVersion100, apiVersion200} // ------------- Init functions ------------- // @@ -26,6 +28,8 @@ var apiVersionToDevfileStruct map[supportedApiVersion]reflect.Type func init() { apiVersionToDevfileStruct = make(map[supportedApiVersion]reflect.Type) apiVersionToDevfileStruct[apiVersion100] = reflect.TypeOf(v100.Devfile100{}) + apiVersionToDevfileStruct[apiVersion200] = reflect.TypeOf(v200.Devfile200{}) + } // Map to store mappings between supported devfile API versions and respective devfile JSON schemas @@ -35,4 +39,6 @@ var devfileApiVersionToJSONSchema map[supportedApiVersion]string func init() { devfileApiVersionToJSONSchema = make(map[supportedApiVersion]string) devfileApiVersionToJSONSchema[apiVersion100] = v100.JsonSchema100 + devfileApiVersionToJSONSchema[apiVersion200] = v200.JsonSchema200 + } diff --git a/pkg/devfile/parser/writer_test.go b/pkg/devfile/parser/writer_test.go index 5368660a3ef..31165ae415d 100644 --- a/pkg/devfile/parser/writer_test.go +++ b/pkg/devfile/parser/writer_test.go @@ -5,7 +5,6 @@ import ( devfileCtx "github.com/openshift/odo/pkg/devfile/parser/context" v100 "github.com/openshift/odo/pkg/devfile/parser/data/1.0.0" - "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/testingutil/filesystem" ) @@ -23,9 +22,9 @@ func TestWriteJsonDevfile(t *testing.T) { devfileObj := DevfileObj{ Ctx: devfileCtx.NewDevfileCtx(devfileTempPath), Data: &v100.Devfile100{ - ApiVersion: common.ApiVersion(apiVersion), - Metadata: common.DevfileMetadata{ - Name: &testName, + ApiVersion: v100.ApiVersion(apiVersion), + Metadata: v100.Metadata{ + Name: testName, }, }, } @@ -51,9 +50,9 @@ func TestWriteJsonDevfile(t *testing.T) { devfileObj := DevfileObj{ Ctx: devfileCtx.NewDevfileCtx(devfileTempPath), Data: &v100.Devfile100{ - ApiVersion: common.ApiVersion(apiVersion), - Metadata: common.DevfileMetadata{ - Name: &testName, + ApiVersion: v100.ApiVersion(apiVersion), + Metadata: v100.Metadata{ + Name: testName, }, }, } diff --git a/pkg/devfile/validate/components.go b/pkg/devfile/validate/components.go index 3afe6f47e69..1471c3347ab 100644 --- a/pkg/devfile/validate/components.go +++ b/pkg/devfile/validate/components.go @@ -2,13 +2,14 @@ package validate import ( "fmt" + "github.com/openshift/odo/pkg/devfile/parser/data/common" ) // Errors var ( - ErrorNoComponents = "no components present" - ErrorNoDockerImageComponent = fmt.Sprintf("odo requires atleast one component of type '%s' in devfile", common.DevfileComponentTypeDockerimage) + ErrorNoComponents = "no components present" + ErrorNoContainerComponent = fmt.Sprintf("odo requires atleast one component of type '%s' in devfile", common.ContainerComponentType) ) // ValidateComponents validates all the devfile components @@ -19,17 +20,17 @@ func ValidateComponents(components []common.DevfileComponent) error { return fmt.Errorf(ErrorNoComponents) } - // Check wether component of type dockerimage is present - isDockerImageComponentPresent := false + // Check if component of type container is present + isContainerComponentPresent := false for _, component := range components { - if component.Type == common.DevfileComponentTypeDockerimage { - isDockerImageComponentPresent = true + if component.Container != nil { + isContainerComponentPresent = true break } } - if !isDockerImageComponentPresent { - return fmt.Errorf(ErrorNoDockerImageComponent) + if !isContainerComponentPresent { + return fmt.Errorf(ErrorNoContainerComponent) } // Successful diff --git a/pkg/devfile/validate/components_test.go b/pkg/devfile/validate/components_test.go index 9e48ea58042..dee7cc45e5f 100644 --- a/pkg/devfile/validate/components_test.go +++ b/pkg/devfile/validate/components_test.go @@ -23,11 +23,13 @@ func TestValidateComponents(t *testing.T) { } }) - t.Run("DockerImage type of component present", func(t *testing.T) { + t.Run("Container type of component present", func(t *testing.T) { components := []common.DevfileComponent{ { - Type: common.DevfileComponentTypeDockerimage, + Container: &common.Container{ + Name: "container", + }, }, } @@ -37,23 +39,4 @@ func TestValidateComponents(t *testing.T) { t.Errorf("Not expecting an error: '%v'", got) } }) - - t.Run("DockerImage type of component NOT present", func(t *testing.T) { - - components := []common.DevfileComponent{ - { - Type: common.DevfileComponentTypeCheEditor, - }, - { - Type: common.DevfileComponentTypeChePlugin, - }, - } - - got := ValidateComponents(components) - want := fmt.Errorf(ErrorNoDockerImageComponent) - - if !reflect.DeepEqual(got, want) { - t.Errorf("Incorrect error; want: '%v', got: '%v'", want, got) - } - }) } diff --git a/pkg/devfile/validate/validate.go b/pkg/devfile/validate/validate.go index 0feb47f6c72..e976bdd3199 100644 --- a/pkg/devfile/validate/validate.go +++ b/pkg/devfile/validate/validate.go @@ -3,25 +3,36 @@ package validate import ( "reflect" - v100 "github.com/openshift/odo/pkg/devfile/parser/data/1.0.0" + "github.com/openshift/odo/pkg/devfile/parser/data/common" "k8s.io/klog" + + v100 "github.com/openshift/odo/pkg/devfile/parser/data/1.0.0" + v200 "github.com/openshift/odo/pkg/devfile/parser/data/2.0.0" ) // ValidateDevfileData validates whether sections of devfile are odo compatible func ValidateDevfileData(data interface{}) error { + var components []common.DevfileComponent typeData := reflect.TypeOf(data) + if typeData == reflect.TypeOf(&v100.Devfile100{}) { d := data.(*v100.Devfile100) + components = d.GetComponents() + } - // Validate Components - if err := ValidateComponents(d.Components); err != nil { - return err - } + if typeData == reflect.TypeOf(&v200.Devfile200{}) { + d := data.(*v200.Devfile200) + components = d.GetComponents() + } - // Successful - klog.V(4).Info("Successfully validated devfile sections") - return nil + // Validate Components + if err := ValidateComponents(components); err != nil { + return err } + + // Successful + klog.V(4).Info("Successfully validated devfile sections") return nil + } diff --git a/pkg/envinfo/envinfo.go b/pkg/envinfo/envinfo.go index 8409d209878..c6aba30c130 100644 --- a/pkg/envinfo/envinfo.go +++ b/pkg/envinfo/envinfo.go @@ -14,25 +14,29 @@ import ( "github.com/openshift/odo/pkg/util" ) -const ( - envInfoEnvName = "ENVINFO" - envInfoFileName = "env.yaml" -) - // ComponentSettings holds all component related information type ComponentSettings struct { - Name string `yaml:"Name,omitempty"` - Namespace string `yaml:"Namespace,omitempty"` - URL *[]EnvInfoURL `yaml:"Url,omitempty"` + Name string `yaml:"Name,omitempty"` + Namespace string `yaml:"Namespace,omitempty"` + URL *[]EnvInfoURL `yaml:"Url,omitempty"` + PushCommand *EnvInfoPushCommand `yaml:"PushCommand,omitempty"` + + // DebugPort controls the port used by the pod to run the debugging agent on + DebugPort *int `yaml:"DebugPort,omitempty"` } // URLKind is an enum to indicate the type of the URL i.e ingress/route type URLKind string const ( - INGRESS URLKind = "ingress" - ROUTE URLKind = "route" - DOCKER URLKind = "docker" + DOCKER URLKind = "docker" + INGRESS URLKind = "ingress" + ROUTE URLKind = "route" + envInfoEnvName = "ENVINFO" + envInfoFileName = "env.yaml" + + // DefaultDebugPort is the default port used for debugging on remote pod + DefaultDebugPort = 5858 ) // EnvInfoURL holds URL related information @@ -44,7 +48,7 @@ type EnvInfoURL struct { // Indicates if the URL should be a secure https one Secure bool `yaml:"Secure,omitempty"` // Cluster host - Host string `yaml:"host,omitempty"` + Host string `yaml:"Host,omitempty"` // TLS secret name to create ingress to provide a secure URL TLSSecret string `yaml:"TLSSecret,omitempty"` // Exposed port number for docker container, required for local scenarios @@ -53,7 +57,14 @@ type EnvInfoURL struct { Kind URLKind `yaml:"Kind,omitempty"` } -// EnvInfo holds all the env specific infomation relavent to a specific Component. +// EnvInfoPushCommand holds the devfile push commands for the component +type EnvInfoPushCommand struct { + Init string `yaml:"Init,omitempty"` + Build string `yaml:"Build,omitempty"` + Run string `yaml:"Run,omitempty"` +} + +// EnvInfo holds all the env specific information relavent to a specific Component. type EnvInfo struct { componentSettings ComponentSettings `yaml:"ComponentSettings,omitempty"` } @@ -161,6 +172,9 @@ func (esi *EnvSpecificInfo) SetConfiguration(parameter string, value interface{} } else { esi.componentSettings.URL = &[]EnvInfoURL{urlValue} } + case "push": + pushCommandValue := value.(EnvInfoPushCommand) + esi.componentSettings.PushCommand = &pushCommandValue } return esi.writeToFile() @@ -266,6 +280,14 @@ func (ei *EnvInfo) GetURL() []EnvInfoURL { return *ei.componentSettings.URL } +// GetPushCommand returns the EnvInfoPushCommand, returns default if nil +func (ei *EnvInfo) GetPushCommand() EnvInfoPushCommand { + if ei.componentSettings.PushCommand == nil { + return EnvInfoPushCommand{} + } + return *ei.componentSettings.PushCommand +} + // GetName returns the component name func (ei *EnvInfo) GetName() string { if ei.componentSettings.Name == "" { @@ -274,6 +296,14 @@ func (ei *EnvInfo) GetName() string { return ei.componentSettings.Name } +// GetDebugPort returns the DebugPort, returns default if nil +func (ei *EnvInfo) GetDebugPort() int { + if ei.componentSettings.DebugPort == nil { + return DefaultDebugPort + } + return *ei.componentSettings.DebugPort +} + // GetNamespace returns component namespace func (ei *EnvInfo) GetNamespace() string { if ei.componentSettings.Namespace == "" { @@ -287,16 +317,21 @@ const ( Create = "CREATE" // CreateDescription is the description of Create parameter CreateDescription = "Create parameter is the action to write devfile metadata to env.yaml" - // URL + // URL parameter URL = "URL" // URLDescription is the description of URL URLDescription = "URL to access the component" + // Push parameter + Push = "PUSH" + // PushDescription is the description of URL + PushDescription = "Push parameter is the action to write devfile commands to env.yaml" ) var ( supportedLocalParameterDescriptions = map[string]string{ Create: CreateDescription, URL: URLDescription, + Push: PushDescription, } lowerCaseLocalParameters = util.GetLowerCaseParameters(GetLocallySupportedParameters()) diff --git a/pkg/envinfo/envinfo_test.go b/pkg/envinfo/envinfo_test.go index 7d138c98598..7831a220da2 100644 --- a/pkg/envinfo/envinfo_test.go +++ b/pkg/envinfo/envinfo_test.go @@ -24,13 +24,15 @@ func TestSetEnvInfo(t *testing.T) { testURL := EnvInfoURL{Name: "testURL", Host: "1.2.3.4.nip.io", TLSSecret: "testTLSSecret"} invalidParam := "invalidParameter" testCreate := ComponentSettings{Name: "componentName", Namespace: "namespace"} + testPush := EnvInfoPushCommand{Init: "myinit", Build: "myBuild", Run: "myRun"} tests := []struct { - name string - parameter string - value interface{} - existingEnvInfo EnvInfo - expectError bool + name string + parameter string + value interface{} + existingEnvInfo EnvInfo + checkConfigSetting []string + expectError bool }{ { name: fmt.Sprintf("Case 1: %s to test", URL), @@ -39,7 +41,8 @@ func TestSetEnvInfo(t *testing.T) { existingEnvInfo: EnvInfo{ componentSettings: ComponentSettings{}, }, - expectError: false, + checkConfigSetting: []string{"URL"}, + expectError: false, }, { name: fmt.Sprintf("Case 2: %s to test", invalidParam), @@ -48,7 +51,8 @@ func TestSetEnvInfo(t *testing.T) { existingEnvInfo: EnvInfo{ componentSettings: ComponentSettings{}, }, - expectError: true, + checkConfigSetting: []string{"URL"}, + expectError: true, }, { name: "Case 3: Test fields setup from create parameter", @@ -57,7 +61,18 @@ func TestSetEnvInfo(t *testing.T) { existingEnvInfo: EnvInfo{ componentSettings: ComponentSettings{}, }, - expectError: false, + checkConfigSetting: []string{"Name", "Namespace"}, + expectError: false, + }, + { + name: "Case 4: Test fields setup from push parameter", + parameter: Push, + value: testPush, + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{}, + }, + checkConfigSetting: []string{"PushCommand"}, + expectError: false, }, } for _, tt := range tests { @@ -68,29 +83,15 @@ func TestSetEnvInfo(t *testing.T) { } esi.EnvInfo = tt.existingEnvInfo err = esi.SetConfiguration(tt.parameter, tt.value) - if err == nil && tt.expectError { - t.Errorf("expected error for SetConfiguration with %s", tt.parameter) - } else if !tt.expectError { - if err != nil { - t.Error(err) - } - + if !tt.expectError && err != nil { + t.Errorf("unexpected error for SetConfiguration with %s: %v", tt.parameter, err) + } else if !tt.expectError && err == nil { isSet := false - - if tt.parameter == Create { - parameters := []string{"Name", "Namespace"} - for _, parameter := range parameters { - isSet = esi.IsSet(parameter) - if !isSet { - t.Errorf("the '%v' is not set", parameter) - } + for _, configSetting := range tt.checkConfigSetting { + isSet = esi.IsSet(configSetting) + if !isSet { + t.Errorf("the setting '%s' is not set", configSetting) } - } else { - isSet = esi.IsSet(tt.parameter) - } - - if !isSet { - t.Errorf("the '%v' is not set", tt.parameter) } } @@ -234,8 +235,149 @@ func TestDeleteURLFromMultipleURLs(t *testing.T) { } +func TestGetPushCommand(t *testing.T) { + tempEnvFile, err := ioutil.TempFile("", "odoenvinfo") + if err != nil { + t.Fatal(err) + } + defer tempEnvFile.Close() + os.Setenv(envInfoEnvName, tempEnvFile.Name()) + + tests := []struct { + name string + existingEnvInfo EnvInfo + wantPushCommand EnvInfoPushCommand + }{ + { + name: "Case 1: Init, Build & Run commands present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Init: "myinit", + Build: "mybuild", + Run: "myrun", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Init: "myinit", + Run: "myrun", + Build: "mybuild", + }, + }, + { + name: "Case 2: Build & Run commands present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Build: "mybuild", + Run: "myrun", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Run: "myrun", + Build: "mybuild", + }, + }, + { + name: "Case 3: Build & Init commands present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Build: "mybuild", + Init: "myinit", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Init: "myinit", + Build: "mybuild", + }, + }, + { + name: "Case 4: Init & Run commands present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Init: "myinit", + Run: "myrun", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Run: "myrun", + Init: "myinit", + }, + }, + { + name: "Case 5: Build command present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Build: "mybuild", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Build: "mybuild", + }, + }, + { + name: "Case 6: Run command present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Run: "myrun", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Run: "myrun", + }, + }, + { + name: "Case 7: Init command present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{ + Init: "myinit", + }, + }, + }, + wantPushCommand: EnvInfoPushCommand{ + Init: "myinit", + }, + }, + { + name: "Case 8: No commands present", + existingEnvInfo: EnvInfo{ + componentSettings: ComponentSettings{ + PushCommand: &EnvInfoPushCommand{}, + }, + }, + wantPushCommand: EnvInfoPushCommand{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + esi, err := NewEnvSpecificInfo("") + if err != nil { + t.Error(err) + } + esi.EnvInfo = tt.existingEnvInfo + pushCommand := esi.GetPushCommand() + + if !reflect.DeepEqual(tt.wantPushCommand, pushCommand) { + t.Errorf("TestGetPushCommand error: push commands mismatch, expected: %v got: %v", tt.wantPushCommand, pushCommand) + } + }) + } + +} + func TestLowerCaseParameterForLocalParameters(t *testing.T) { - expected := map[string]bool{"create": true, "url": true} + expected := map[string]bool{"create": true, "push": true, "url": true} actual := util.GetLowerCaseParameters(GetLocallySupportedParameters()) if !reflect.DeepEqual(expected, actual) { t.Errorf("expected '%v', got '%v'", expected, actual) diff --git a/pkg/exec/devfile.go b/pkg/exec/devfile.go index 9116e8b76a4..8694c3b14cb 100644 --- a/pkg/exec/devfile.go +++ b/pkg/exec/devfile.go @@ -2,45 +2,60 @@ package exec import ( "fmt" + "io" adaptersCommon "github.com/openshift/odo/pkg/devfile/adapters/common" "github.com/openshift/odo/pkg/devfile/parser/data/common" "github.com/openshift/odo/pkg/log" + "github.com/openshift/odo/pkg/machineoutput" "github.com/pkg/errors" ) // ExecuteDevfileBuildAction executes the devfile build command action -func ExecuteDevfileBuildAction(client ExecClient, action common.DevfileCommandAction, commandName string, compInfo adaptersCommon.ComponentInfo, show bool) error { +func ExecuteDevfileBuildAction(client ExecClient, exec common.Exec, commandName string, compInfo adaptersCommon.ComponentInfo, show bool, machineEventLogger machineoutput.MachineEventLoggingClient) error { var s *log.Status // Change to the workdir and execute the command var cmdArr []string - if action.Workdir != nil { + if exec.WorkingDir != "" { // since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd" - cmdArr = []string{adaptersCommon.ShellExecutable, "-c", "cd " + *action.Workdir + " && " + *action.Command} + cmdArr = []string{adaptersCommon.ShellExecutable, "-c", "cd " + exec.WorkingDir + " && " + exec.CommandLine} } else { - cmdArr = []string{adaptersCommon.ShellExecutable, "-c", *action.Command} + cmdArr = []string{adaptersCommon.ShellExecutable, "-c", exec.CommandLine} } if show { - s = log.SpinnerNoSpin("Executing " + commandName + " command " + fmt.Sprintf("%q", *action.Command)) + s = log.SpinnerNoSpin("Executing " + commandName + " command " + fmt.Sprintf("%q", exec.CommandLine)) } else { - s = log.Spinnerf("Executing %s command %q", commandName, *action.Command) + s = log.Spinnerf("Executing %s command %q", commandName, exec.CommandLine) } defer s.End(false) - err := ExecuteCommand(client, compInfo, cmdArr, show) + // Emit DevFileCommandExecutionBegin JSON event (if machine output logging is enabled) + machineEventLogger.DevFileCommandExecutionBegin(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow()) + + // Capture container text and log to the screen as JSON events (machine output only) + stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := machineEventLogger.CreateContainerOutputWriter() + + err := ExecuteCommand(client, compInfo, cmdArr, show, stdoutWriter, stderrWriter) + + // Close the writers and wait for an acknowledgement that the reader loop has exited (to ensure we get ALL container output) + closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel) + + // Emit close event + machineEventLogger.DevFileCommandExecutionComplete(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow(), err) if err != nil { return errors.Wrapf(err, "unable to execute the build command") } + s.End(true) return nil } // ExecuteDevfileRunAction executes the devfile run command action using the supervisord devrun program -func ExecuteDevfileRunAction(client ExecClient, action common.DevfileCommandAction, commandName string, compInfo adaptersCommon.ComponentInfo, show bool) error { +func ExecuteDevfileRunAction(client ExecClient, exec common.Exec, commandName string, compInfo adaptersCommon.ComponentInfo, show bool, machineEventLogger machineoutput.MachineEventLoggingClient) error { var s *log.Status // Exec the supervisord ctl stop and start for the devrun program @@ -56,15 +71,28 @@ func ExecuteDevfileRunAction(client ExecClient, action common.DevfileCommandActi }, } - s = log.Spinnerf("Executing %s command %q", commandName, *action.Command) + s = log.Spinnerf("Executing %s command %q", commandName, exec.CommandLine) defer s.End(false) for _, devRunExec := range devRunExecs { - err := ExecuteCommand(client, compInfo, devRunExec.command, show) + // Emit DevFileCommandExecutionBegin JSON event (if machine output logging is enabled) + machineEventLogger.DevFileCommandExecutionBegin(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow()) + + // Capture container text and log to the screen as JSON events (machine output only) + stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := machineEventLogger.CreateContainerOutputWriter() + + err := ExecuteCommand(client, compInfo, devRunExec.command, show, stdoutWriter, stderrWriter) + + // Close the writers and wait for an acknowledgement that the reader loop has exited (to ensure we get ALL container output) + closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel) + + // Emit close event + machineEventLogger.DevFileCommandExecutionComplete(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow(), err) if err != nil { return errors.Wrapf(err, "unable to execute the run command") } + } s.End(true) @@ -72,7 +100,7 @@ func ExecuteDevfileRunAction(client ExecClient, action common.DevfileCommandActi } // ExecuteDevfileRunActionWithoutRestart executes devfile run command without restarting. -func ExecuteDevfileRunActionWithoutRestart(client ExecClient, action common.DevfileCommandAction, commandName string, compInfo adaptersCommon.ComponentInfo, show bool) error { +func ExecuteDevfileRunActionWithoutRestart(client ExecClient, exec common.Exec, commandName string, compInfo adaptersCommon.ComponentInfo, show bool, machineEventLogger machineoutput.MachineEventLoggingClient) error { var s *log.Status type devRunExecutable struct { @@ -84,10 +112,107 @@ func ExecuteDevfileRunActionWithoutRestart(client ExecClient, action common.Devf command: []string{adaptersCommon.SupervisordBinaryPath, adaptersCommon.SupervisordCtlSubCommand, "start", string(adaptersCommon.DefaultDevfileRunCommand)}, } - s = log.Spinnerf("Executing %s command %q, if not running", commandName, *action.Command) + s = log.Spinnerf("Executing %s command %q, if not running", commandName, exec.CommandLine) + defer s.End(false) + + // Emit DevFileCommandExecutionBegin JSON event (if machine output logging is enabled) + machineEventLogger.DevFileCommandExecutionBegin(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow()) + + // Capture container text and log to the screen as JSON events (machine output only) + stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := machineEventLogger.CreateContainerOutputWriter() + + err := ExecuteCommand(client, compInfo, devRunExec.command, show, stdoutWriter, stderrWriter) + + // Close the writers and wait for an acknowledgement that the reader loop has exited (to ensure we get ALL container output) + closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel) + + // Emit close event + machineEventLogger.DevFileCommandExecutionComplete(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow(), err) + if err != nil { + return errors.Wrapf(err, "unable to execute the run command") + } + + s.End(true) + + return nil +} + +// ExecuteDevfileDebugAction executes the devfile debug command action using the supervisord debugrun program +func ExecuteDevfileDebugAction(client ExecClient, exec common.Exec, commandName string, compInfo adaptersCommon.ComponentInfo, show bool, machineEventLogger machineoutput.MachineEventLoggingClient) error { + var s *log.Status + + // Exec the supervisord ctl stop and start for the debugRun program + type debugRunExecutable struct { + command []string + } + debugRunExecs := []debugRunExecutable{ + { + command: []string{adaptersCommon.SupervisordBinaryPath, adaptersCommon.SupervisordCtlSubCommand, "stop", "all"}, + }, + { + command: []string{adaptersCommon.SupervisordBinaryPath, adaptersCommon.SupervisordCtlSubCommand, "start", string(adaptersCommon.DefaultDevfileDebugCommand)}, + }, + } + + s = log.Spinnerf("Executing %s command %q", commandName, exec.CommandLine) + defer s.End(false) + + for _, debugRunExec := range debugRunExecs { + + // Emit DevFileCommandExecutionBegin JSON event (if machine output logging is enabled) + machineEventLogger.DevFileCommandExecutionBegin(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow()) + + // Capture container text and log to the screen as JSON events (machine output only) + stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := machineEventLogger.CreateContainerOutputWriter() + + err := ExecuteCommand(client, compInfo, debugRunExec.command, show, stdoutWriter, stderrWriter) + + // Close the writers and wait for an acknowledgement that the reader loop has exited (to ensure we get ALL container output) + closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel) + + // Emit close event + machineEventLogger.DevFileCommandExecutionComplete(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow(), err) + + if err != nil { + return errors.Wrapf(err, "unable to execute the run command") + } + } + + s.End(true) + + return nil +} + +// ExecuteDevfileDebugActionWithoutRestart executes devfile run command without restarting. +func ExecuteDevfileDebugActionWithoutRestart(client ExecClient, exec common.Exec, commandName string, compInfo adaptersCommon.ComponentInfo, show bool, machineEventLogger machineoutput.MachineEventLoggingClient) error { + var s *log.Status + + type devDebugExecutable struct { + command []string + } + // with restart false, executing only supervisord start command, if the command is already running, supvervisord will not restart it. + // if the command is failed or not running suprvisord would start it. + devDebugExec := devDebugExecutable{ + command: []string{adaptersCommon.SupervisordBinaryPath, adaptersCommon.SupervisordCtlSubCommand, "start", string(adaptersCommon.DefaultDevfileDebugCommand)}, + } + + // Emit DevFileCommandExecutionBegin JSON event (if machine output logging is enabled) + machineEventLogger.DevFileCommandExecutionBegin(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow()) + + // Capture container text and log to the screen as JSON events (machine output only) + stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := machineEventLogger.CreateContainerOutputWriter() + + s = log.Spinnerf("Executing %s command %q, if not running", commandName, exec.CommandLine) defer s.End(false) - err := ExecuteCommand(client, compInfo, devRunExec.command, show) + err := ExecuteCommand(client, compInfo, devDebugExec.command, show, stdoutWriter, stderrWriter) + + // Close the writers and wait for an acknowledgement that the reader loop has exited (to ensure we get ALL container output) + closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel) + + // Emit close event + machineEventLogger.DevFileCommandExecutionComplete(exec.Id, exec.Component, exec.CommandLine, convertGroupKindToString(exec), machineoutput.TimestampNow(), err) + if err != nil { return errors.Wrapf(err, "unable to execute the run command") } @@ -96,3 +221,23 @@ func ExecuteDevfileRunActionWithoutRestart(client ExecClient, action common.Devf return nil } + +// closeWriterAndWaitForAck closes the PipeWriter and then waits for a channel response from the ContainerOutputWriter (indicating that the reader had closed). +// This ensures that we always get the full stderr/stdout output from the container process BEFORE we output the devfileCommandExecution event. +func closeWriterAndWaitForAck(stdoutWriter *io.PipeWriter, stdoutChannel chan interface{}, stderrWriter *io.PipeWriter, stderrChannel chan interface{}) { + if stdoutWriter != nil { + _ = stdoutWriter.Close() + <-stdoutChannel + } + if stderrWriter != nil { + _ = stderrWriter.Close() + <-stderrChannel + } +} + +func convertGroupKindToString(exec common.Exec) string { + if exec.Group == nil { + return "" + } + return string(exec.Group.Kind) +} diff --git a/pkg/exec/exec.go b/pkg/exec/exec.go index b1b7c95066c..283bc97fa8c 100644 --- a/pkg/exec/exec.go +++ b/pkg/exec/exec.go @@ -12,19 +12,49 @@ import ( "github.com/openshift/odo/pkg/log" ) +// ExecClient is a wrapper around ExecCMDInContainer which executes a command in a specific container of a pod. type ExecClient interface { ExecCMDInContainer(common.ComponentInfo, []string, io.Writer, io.Writer, io.Reader, bool) error } // ExecuteCommand executes the given command in the pod's container -func ExecuteCommand(client ExecClient, compInfo common.ComponentInfo, command []string, show bool) (err error) { - reader, writer := io.Pipe() +func ExecuteCommand(client ExecClient, compInfo common.ComponentInfo, command []string, show bool, consoleOutputStdout *io.PipeWriter, consoleOutputStderr *io.PipeWriter) (err error) { + stdoutReader, stdoutWriter := io.Pipe() + stderrReader, stderrWriter := io.Pipe() + var cmdOutput string - klog.V(3).Infof("Executing command %v for pod: %v in container: %v", command, compInfo.PodName, compInfo.ContainerName) + klog.V(4).Infof("Executing command %v for pod: %v in container: %v", command, compInfo.PodName, compInfo.ContainerName) + + // Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil) + stdoutCompleteChannel := startReaderGoroutine(stdoutReader, show, &cmdOutput, consoleOutputStdout) + stderrCompleteChannel := startReaderGoroutine(stderrReader, show, &cmdOutput, consoleOutputStderr) + + err = client.ExecCMDInContainer(compInfo, command, stdoutWriter, stderrWriter, nil, false) + + // Block until we have received all the container output from each stream + _ = stdoutWriter.Close() + <-stdoutCompleteChannel + _ = stderrWriter.Close() + <-stderrCompleteChannel + + if err != nil { + // It is safe to read from cmdOutput here, as the goroutines are guaranteed to have terminated at this point. + log.Errorf("\nUnable to exec command %v: \n%v", command, cmdOutput) + + return err + } + + return +} + +// This goroutine will automatically pipe the output from the writer (passed into ExecCMDInContainer) to +// the loggers. +// The returned channel will contain a single nil entry once the reader has closed. +func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *string, consoleOutput *io.PipeWriter) chan interface{} { + + result := make(chan interface{}) - // This Go routine will automatically pipe the output from ExecCMDInContainer to - // our logger. go func() { scanner := bufio.NewScanner(reader) for scanner.Scan() { @@ -33,19 +63,22 @@ func ExecuteCommand(client ExecClient, compInfo common.ComponentInfo, command [] if log.IsDebug() || show { _, err := fmt.Fprintln(os.Stdout, line) if err != nil { - log.Errorf("Unable to print to stdout: %v", err) + log.Errorf("Unable to print to stdout: %s", err.Error()) } } - cmdOutput += fmt.Sprintln(line) + *cmdOutput += fmt.Sprintln(line) + + if consoleOutput != nil { + _, err := consoleOutput.Write([]byte(line + "\n")) + if err != nil { + log.Errorf("Error occurred on writing string to consoleOutput writer: %s", err.Error()) + } + } } + result <- nil }() - err = client.ExecCMDInContainer(compInfo, command, writer, writer, nil, false) - if err != nil { - log.Errorf("\nUnable to exec command %v: \n%v", command, cmdOutput) - return err - } + return result - return } diff --git a/pkg/kclient/fake/ingress.go b/pkg/kclient/fake/ingress.go index 6de515cb489..41ddc1cf725 100644 --- a/pkg/kclient/fake/ingress.go +++ b/pkg/kclient/fake/ingress.go @@ -1,6 +1,8 @@ package fake import ( + "fmt" + applabels "github.com/openshift/odo/pkg/application/labels" componentlabels "github.com/openshift/odo/pkg/component/labels" "github.com/openshift/odo/pkg/kclient" @@ -25,7 +27,7 @@ func GetIngressListWithMultiple(componentName string) *extensionsv1.IngressList labels.URLLabel: "example-0", }, }, - Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{ServiceName: "example-0", PortNumber: intstr.FromInt(8080)}), + Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{IngressDomain: "example-0.com", ServiceName: "example-0", PortNumber: intstr.FromInt(8080)}), }, { ObjectMeta: metav1.ObjectMeta{ @@ -38,7 +40,7 @@ func GetIngressListWithMultiple(componentName string) *extensionsv1.IngressList labels.URLLabel: "example-1", }, }, - Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{ServiceName: "example-1", PortNumber: intstr.FromInt(9090)}), + Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{IngressDomain: "example-1.com", ServiceName: "example-1", PortNumber: intstr.FromInt(9090)}), }, }, } @@ -57,6 +59,6 @@ func GetSingleIngress(urlName, componentName string) *extensionsv1.Ingress { applabels.App: "", }, }, - Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{ServiceName: urlName, PortNumber: intstr.FromInt(8080)}), + Spec: *kclient.GenerateIngressSpec(kclient.IngressParameter{IngressDomain: fmt.Sprintf("%s.com", urlName), ServiceName: urlName, PortNumber: intstr.FromInt(8080)}), } } diff --git a/pkg/kclient/fakeclient.go b/pkg/kclient/fakeclient.go index 3e88b1d0978..726a84e7808 100644 --- a/pkg/kclient/fakeclient.go +++ b/pkg/kclient/fakeclient.go @@ -30,7 +30,8 @@ func FakeNew() (*Client, *FakeClientset) { func FakePodStatus(status corev1.PodPhase, podName string) *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: podName, + Name: podName, + Labels: map[string]string{}, }, Status: corev1.PodStatus{ Phase: status, diff --git a/pkg/kclient/generators.go b/pkg/kclient/generators.go index ab99888eda5..0340ef181ca 100644 --- a/pkg/kclient/generators.go +++ b/pkg/kclient/generators.go @@ -2,6 +2,7 @@ package kclient import ( "github.com/openshift/odo/pkg/devfile/adapters/common" + "k8s.io/client-go/rest" // api resource types @@ -39,10 +40,10 @@ func GenerateContainer(name, image string, isPrivileged bool, command, args []st Image: image, ImagePullPolicy: corev1.PullAlways, Resources: resourceReqs, - Command: command, - Args: args, Env: envVars, Ports: ports, + Command: command, + Args: args, } if isPrivileged { @@ -249,3 +250,13 @@ func GenerateOwnerReference(deployment *appsv1.Deployment) metav1.OwnerReference return ownerReference } + +// GeneratePortForwardReq builds a port forward request +func (c *Client) GeneratePortForwardReq(podName string) *rest.Request { + return c.KubeClient.CoreV1().RESTClient(). + Post(). + Resource("pods"). + Namespace(c.Namespace). + Name(podName). + SubResource("portforward") +} diff --git a/pkg/kclient/pods.go b/pkg/kclient/pods.go index aacb9cc6aea..603c65673c8 100644 --- a/pkg/kclient/pods.go +++ b/pkg/kclient/pods.go @@ -2,6 +2,7 @@ package kclient import ( "bytes" + "fmt" "io" "strings" "time" @@ -147,3 +148,27 @@ func (c *Client) ExtractProjectToComponent(compInfo common.ComponentInfo, target } return nil } + +// GetPodUsingComponentName gets a pod using the component name +func (c *Client) GetPodUsingComponentName(componentName string) (*corev1.Pod, error) { + podSelector := fmt.Sprintf("component=%s", componentName) + return c.GetOnePodFromSelector(podSelector) +} + +// GetOnePodFromSelector gets a pod from the selector +func (c *Client) GetOnePodFromSelector(selector string) (*corev1.Pod, error) { + pods, err := c.KubeClient.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return nil, errors.Wrapf(err, "unable to get Pod for the selector: %v", selector) + } + numPods := len(pods.Items) + if numPods == 0 { + return nil, fmt.Errorf("no Pod was found for the selector: %v", selector) + } else if numPods > 1 { + return nil, fmt.Errorf("multiple Pods exist for the selector: %v. Only one must be present", selector) + } + + return &pods.Items[0], nil +} diff --git a/pkg/kclient/pods_test.go b/pkg/kclient/pods_test.go index 6e97c0235de..203b28f5e82 100644 --- a/pkg/kclient/pods_test.go +++ b/pkg/kclient/pods_test.go @@ -2,6 +2,8 @@ package kclient import ( "fmt" + "k8s.io/apimachinery/pkg/runtime" + "reflect" "testing" corev1 "k8s.io/api/core/v1" @@ -82,3 +84,125 @@ func TestWaitAndGetPod(t *testing.T) { }) } } + +func TestGetOnePodFromSelector(t *testing.T) { + fakePod := FakePodStatus(corev1.PodRunning, "nodejs") + fakePod.Labels["component"] = "nodejs" + + type args struct { + selector string + } + tests := []struct { + name string + args args + returnedPods *corev1.PodList + want *corev1.Pod + wantErr bool + }{ + { + name: "valid number of pods", + args: args{selector: fmt.Sprintf("component=%s", "nodejs")}, + returnedPods: &corev1.PodList{ + Items: []corev1.Pod{ + *fakePod, + }, + }, + want: fakePod, + wantErr: false, + }, + { + name: "zero pods", + args: args{selector: fmt.Sprintf("component=%s", "nodejs")}, + returnedPods: &corev1.PodList{ + Items: []corev1.Pod{}, + }, + want: &corev1.Pod{}, + wantErr: true, + }, + { + name: "mutiple pods", + args: args{selector: fmt.Sprintf("component=%s", "nodejs")}, + returnedPods: &corev1.PodList{ + Items: []corev1.Pod{ + *fakePod, + *fakePod, + }, + }, + want: &corev1.Pod{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fkclient, fkclientset := FakeNew() + + fkclientset.Kubernetes.PrependReactor("list", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + if action.(ktesting.ListAction).GetListRestrictions().Labels.String() != fmt.Sprintf("component=%s", "nodejs") { + t.Errorf("list called with different selector want:%s, got:%s", fmt.Sprintf("component=%s", "nodejs"), action.(ktesting.ListAction).GetListRestrictions().Labels.String()) + } + return true, tt.returnedPods, nil + }) + + got, err := fkclient.GetOnePodFromSelector(tt.args.selector) + if (err != nil) != tt.wantErr { + t.Errorf("GetOnePodFromSelector() error = %v, wantErr %v", err, tt.wantErr) + return + } else if tt.wantErr && err != nil { + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetOnePodFromSelector() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetPodUsingComponentName(t *testing.T) { + fakePod := FakePodStatus(corev1.PodRunning, "nodejs") + fakePod.Labels["component"] = "nodejs" + + type args struct { + componentName string + } + tests := []struct { + name string + args args + want *corev1.Pod + wantErr bool + }{ + { + name: "list called with same component name", + args: args{ + componentName: "nodejs", + }, + want: fakePod, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fkclient, fkclientset := FakeNew() + + fkclientset.Kubernetes.PrependReactor("list", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + if action.(ktesting.ListAction).GetListRestrictions().Labels.String() != fmt.Sprintf("component=%s", tt.args.componentName) { + t.Errorf("list called with different selector want:%s, got:%s", fmt.Sprintf("component=%s", tt.args.componentName), action.(ktesting.ListAction).GetListRestrictions().Labels.String()) + } + return true, &corev1.PodList{ + Items: []corev1.Pod{ + *fakePod, + }, + }, nil + }) + + got, err := fkclient.GetPodUsingComponentName(tt.args.componentName) + if (err != nil) != tt.wantErr { + t.Errorf("GetPodUsingComponentName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetPodUsingComponentName() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/kclient/volumes.go b/pkg/kclient/volumes.go index 49897ffd559..13f168ddea4 100644 --- a/pkg/kclient/volumes.go +++ b/pkg/kclient/volumes.go @@ -88,8 +88,8 @@ func AddPVCAndVolumeMount(podTemplateSpec *corev1.PodTemplateSpec, volumeNameToP componentAliasToMountPaths := make(map[string][]string) for containerName, volumes := range componentAliasToVolumes { for _, volume := range volumes { - if volName == *volume.Name { - componentAliasToMountPaths[containerName] = append(componentAliasToMountPaths[containerName], *volume.ContainerPath) + if volName == volume.Name { + componentAliasToMountPaths[containerName] = append(componentAliasToMountPaths[containerName], volume.ContainerPath) } } } diff --git a/pkg/kclient/volumes_test.go b/pkg/kclient/volumes_test.go index 9d8d0c28ea1..dad3ee5cd20 100644 --- a/pkg/kclient/volumes_test.go +++ b/pkg/kclient/volumes_test.go @@ -380,26 +380,26 @@ func TestAddPVCAndVolumeMount(t *testing.T) { componentAliasToVolumes: map[string][]common.DevfileVolume{ "container1": []common.DevfileVolume{ { - Name: &volNames[0], - ContainerPath: &volContainerPath[0], + Name: volNames[0], + ContainerPath: volContainerPath[0], }, { - Name: &volNames[0], - ContainerPath: &volContainerPath[1], + Name: volNames[0], + ContainerPath: volContainerPath[1], }, { - Name: &volNames[1], - ContainerPath: &volContainerPath[2], + Name: volNames[1], + ContainerPath: volContainerPath[2], }, }, "container2": []common.DevfileVolume{ { - Name: &volNames[1], - ContainerPath: &volContainerPath[1], + Name: volNames[1], + ContainerPath: volContainerPath[1], }, { - Name: &volNames[2], - ContainerPath: &volContainerPath[2], + Name: volNames[2], + ContainerPath: volContainerPath[2], }, }, }, @@ -421,12 +421,12 @@ func TestAddPVCAndVolumeMount(t *testing.T) { componentAliasToVolumes: map[string][]common.DevfileVolume{ "container2": []common.DevfileVolume{ { - Name: &volNames[1], - ContainerPath: &volContainerPath[1], + Name: volNames[1], + ContainerPath: volContainerPath[1], }, { - Name: &volNames[2], - ContainerPath: &volContainerPath[2], + Name: volNames[2], + ContainerPath: volContainerPath[2], }, }, }, @@ -474,8 +474,8 @@ func TestAddPVCAndVolumeMount(t *testing.T) { volumeMatched := 0 for _, volumeMount := range container.VolumeMounts { for _, testVolume := range testContainerVolumes { - testVolumeName := *testVolume.Name - testVolumePath := *testVolume.ContainerPath + testVolumeName := testVolume.Name + testVolumePath := testVolume.ContainerPath if strings.Contains(volumeMount.Name, testVolumeName) && volumeMount.MountPath == testVolumePath { volumeMatched++ } diff --git a/pkg/lclient/containers.go b/pkg/lclient/containers.go index f14a1d21885..e53e40375e8 100644 --- a/pkg/lclient/containers.go +++ b/pkg/lclient/containers.go @@ -3,6 +3,7 @@ package lclient import ( "io" "strings" + "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -53,19 +54,19 @@ func (dc *Client) GetContainerList() ([]types.Container, error) { // containerConfig - configurations for the container itself (image name, command, ports, etc) (if needed) // hostConfig - configurations related to the host (volume mounts, exposed ports, etc) (if needed) // networkingConfig - endpoints to expose (if needed) -// Returns an error if the container couldn't be started. -func (dc *Client) StartContainer(containerConfig *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error { +// Returns containerID of the started container, an error if the container couldn't be started +func (dc *Client) StartContainer(containerConfig *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) (string, error) { resp, err := dc.Client.ContainerCreate(dc.Context, containerConfig, hostConfig, networkingConfig, "") if err != nil { - return err + return "", err } // Start the container if err := dc.Client.ContainerStart(dc.Context, resp.ID, types.ContainerStartOptions{}); err != nil { - return err + return "", err } - return nil + return resp.ID, nil } // RemoveContainer takes in a given container ID and kills it, then removes it. @@ -150,3 +151,22 @@ func (dc *Client) ExtractProjectToComponent(compInfo common.ComponentInfo, targe } return nil } + +// WaitForContainer waits for the container until the condition is reached +func (dc *Client) WaitForContainer(containerID string, condition container.WaitCondition) error { + + containerWaitCh, errCh := dc.Client.ContainerWait(dc.Context, containerID, condition) + for { + select { + case containerWait := <-containerWaitCh: + if containerWait.StatusCode != 0 { + return errors.Errorf("error waiting on container %s until condition %s; status code: %v, error message: %v", containerID, string(condition), containerWait.StatusCode, containerWait.Error.Message) + } + return nil + case err := <-errCh: + return errors.Wrapf(err, "unable to wait on container %s until condition %s", containerID, string(condition)) + case <-time.After(2 * time.Minute): + return errors.Errorf("timeout while waiting for container %s to reach condition %s", containerID, string(condition)) + } + } +} diff --git a/pkg/lclient/containers_test.go b/pkg/lclient/containers_test.go index e9290e666c3..c9789c2f376 100644 --- a/pkg/lclient/containers_test.go +++ b/pkg/lclient/containers_test.go @@ -8,7 +8,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" - "github.com/docker/go-connections/nat" gomock "github.com/golang/mock/gomock" "github.com/openshift/odo/pkg/devfile/adapters/common" ) @@ -183,16 +182,6 @@ func TestGetContainersList(t *testing.T) { "component": "golang", "8080": "testurl2", }, - HostConfig: container.HostConfig{ - PortBindings: nat.PortMap{ - nat.Port("8080/tcp"): []nat.PortBinding{ - nat.PortBinding{ - HostIP: "127.0.0.1", - HostPort: "54321", - }, - }, - }, - }, }, { Names: []string{"/go-test-build"}, @@ -200,16 +189,12 @@ func TestGetContainersList(t *testing.T) { Image: "golang", Labels: map[string]string{ "component": "golang", + "alias": "alias1", "8080": "testurl3", }, - HostConfig: container.HostConfig{ - PortBindings: nat.PortMap{ - nat.Port("8080/tcp"): []nat.PortBinding{ - nat.PortBinding{ - HostIP: "127.0.0.1", - HostPort: "65432", - }, - }, + Mounts: []types.MountPoint{ + { + Destination: OdoSourceVolumeMount, }, }, }, @@ -244,26 +229,31 @@ func TestStartContainer(t *testing.T) { fakeContainer := container.Config{} tests := []struct { - name string - client *Client - wantErr bool + name string + client *Client + wantContainerID string + wantErr bool }{ { - name: "Case 1: Successfully start container", - client: fakeClient, - wantErr: false, + name: "Case 1: Successfully start container", + client: fakeClient, + wantContainerID: "golang", + wantErr: false, }, { - name: "Case 2: Fail to start", - client: fakeErrorClient, - wantErr: true, + name: "Case 2: Fail to start", + client: fakeErrorClient, + wantContainerID: "", + wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.client.StartContainer(&fakeContainer, nil, nil) + containerID, err := tt.client.StartContainer(&fakeContainer, nil, nil) if !tt.wantErr == (err != nil) { - t.Errorf("expected %v, wanted %v", err, tt.wantErr) + t.Errorf("TestStartContainer error: expected %v, wanted %v", err, tt.wantErr) + } else if !tt.wantErr && containerID != tt.wantContainerID { + t.Errorf("TestStartContainer error: container id of start container did not match: got %v, wanted %v", containerID, tt.wantContainerID) } }) } @@ -409,3 +399,42 @@ func TestExecCMDInContainer(t *testing.T) { }) } } + +func TestWaitForContainer(t *testing.T) { + fakeClient := FakeNew() + fakeErrorClient := FakeErrorNew() + + tests := []struct { + name string + client *Client + condition container.WaitCondition + wantErr bool + }{ + { + name: "Case 1: Successfully wait for a condition", + client: fakeClient, + condition: container.WaitConditionNotRunning, + wantErr: false, + }, + { + name: "Case 2: Failed to wait for a condition with error channel", + client: fakeErrorClient, + condition: container.WaitConditionNotRunning, + wantErr: true, + }, + { + name: "Case 3: Failed to wait for a condition with bad exit code", + client: fakeErrorClient, + condition: container.WaitConditionNextExit, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.client.WaitForContainer("id", tt.condition) + if !tt.wantErr == (err != nil) { + t.Errorf("got: %v, wanted: %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/lclient/fakeclient.go b/pkg/lclient/fakeclient.go index cfa151cbf5b..efe3617c906 100644 --- a/pkg/lclient/fakeclient.go +++ b/pkg/lclient/fakeclient.go @@ -39,6 +39,82 @@ var mockImageSummary = []types.ImageSummary{ }, } +var mockContainerJSONList = []types.ContainerJSON{ + types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + Name: "/node", + Image: "node", + ID: "1", + }, + Mounts: []types.MountPoint{ + { + Destination: OdoSourceVolumeMount, + }, + }, + Config: &container.Config{ + Image: "node", + Labels: map[string]string{ + "component": "test", + "alias": "alias1", + }, + }, + }, + types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + Name: "/go-test", + Image: "golang", + ID: "2", + HostConfig: &container.HostConfig{ + PortBindings: nat.PortMap{ + nat.Port("8080/tcp"): []nat.PortBinding{ + nat.PortBinding{ + HostIP: "127.0.0.1", + HostPort: "54321", + }, + }, + }, + }, + }, + Config: &container.Config{ + Image: "golang", + Labels: map[string]string{ + "component": "golang", + "8080": "testurl2", + }, + }, + }, + types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + Name: "/go-test-build", + Image: "golang", + ID: "3", + HostConfig: &container.HostConfig{ + PortBindings: nat.PortMap{ + nat.Port("8080/tcp"): []nat.PortBinding{ + nat.PortBinding{ + HostIP: "127.0.0.1", + HostPort: "65432", + }, + }, + }, + }, + }, + Mounts: []types.MountPoint{ + { + Destination: OdoSourceVolumeMount, + }, + }, + Config: &container.Config{ + Image: "golang", + Labels: map[string]string{ + "component": "test", + "alias": "alias1", + "8080": "testurl3", + }, + }, + }, +} + var mockContainerList = []types.Container{ types.Container{ Names: []string{"/node"}, @@ -62,16 +138,6 @@ var mockContainerList = []types.Container{ "component": "golang", "8080": "testurl2", }, - HostConfig: container.HostConfig{ - PortBindings: nat.PortMap{ - nat.Port("8080/tcp"): []nat.PortBinding{ - nat.PortBinding{ - HostIP: "127.0.0.1", - HostPort: "54321", - }, - }, - }, - }, }, types.Container{ Names: []string{"/go-test-build"}, @@ -80,15 +146,11 @@ var mockContainerList = []types.Container{ Labels: map[string]string{ "component": "golang", "8080": "testurl3", + "alias": "alias1", }, - HostConfig: container.HostConfig{ - PortBindings: nat.PortMap{ - nat.Port("8080/tcp"): []nat.PortBinding{ - nat.PortBinding{ - HostIP: "127.0.0.1", - HostPort: "65432", - }, - }, + Mounts: []types.MountPoint{ + { + Destination: OdoSourceVolumeMount, }, }, }, @@ -137,18 +199,9 @@ func (m *mockDockerClient) ContainerRemove(ctx context.Context, containerID stri } func (m *mockDockerClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - for _, containerElement := range mockContainerList { + for _, containerElement := range mockContainerJSONList { if containerElement.ID == containerID { - containerConfig := container.Config{ - Image: containerElement.Image, - Labels: containerElement.Labels, - } - return types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - HostConfig: &containerElement.HostConfig, - }, - Config: &containerConfig, - }, nil + return containerElement, nil } } return types.ContainerJSON{}, nil @@ -157,6 +210,15 @@ func (m *mockDockerClient) ContainerInspect(ctx context.Context, containerID str func (m *mockDockerClient) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { resultC := make(chan container.ContainerWaitOKBody) + go func() { + res := container.ContainerWaitOKBody{ + StatusCode: 0, + Error: &container.ContainerWaitOKBodyError{ + Message: "", + }, + } + resultC <- res + }() return resultC, nil } @@ -195,6 +257,21 @@ func (m *mockDockerClient) VolumeList(ctx context.Context, filter filters.Args) "component": "test", "type": "projects", }, + Name: "odo-project-source-test", + }, + { + Labels: map[string]string{ + "component": "duplicate", + "type": "projects", + }, + Name: "odo-project-source-duplicate1", + }, + { + Labels: map[string]string{ + "component": "duplicate", + "type": "projects", + }, + Name: "odo-project-source-duplicate2", }, }, }, nil @@ -246,7 +323,7 @@ var errContainerStart = errors.New("error starting containers") var errContainerStop = errors.New("error stopping container") var errContainerRemove = errors.New("error removing container") var errContainerInspect = errors.New("error inspecting container") -var errContainerWait = errors.New("error timeout waiting for container") +var errContainerWait = errors.New("error waiting for container") var errDistributionInspect = errors.New("error inspecting distribution") var errVolumeCreate = errors.New("error creating volume") var errVolumeList = errors.New("error listing volume") @@ -289,8 +366,23 @@ func (m *mockDockerErrorClient) ContainerInspect(ctx context.Context, containerI } func (m *mockDockerErrorClient) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) err := make(chan error) - err <- errContainerWait + go func() { + if condition == container.WaitConditionNextExit { + res := container.ContainerWaitOKBody{ + StatusCode: 1, + Error: &container.ContainerWaitOKBodyError{ + Message: "bad status code", + }, + } + resultC <- res + } + err <- errContainerWait + }() + if condition == container.WaitConditionNextExit { + return resultC, nil + } return nil, err } diff --git a/pkg/log/status.go b/pkg/log/status.go index b78f4efe99f..77766172715 100644 --- a/pkg/log/status.go +++ b/pkg/log/status.go @@ -303,7 +303,7 @@ func Spinnerf(format string, a ...interface{}) *Status { // SpinnerNoSpin is the same as the "Spinner" function but forces no spinning func SpinnerNoSpin(status string) *Status { - s := NewStatus(os.Stdout) + s := NewStatus(GetStdout()) s.Start(status, true) return s } diff --git a/pkg/machineoutput/event_logging.go b/pkg/machineoutput/event_logging.go new file mode 100644 index 00000000000..992c21c4db6 --- /dev/null +++ b/pkg/machineoutput/event_logging.go @@ -0,0 +1,244 @@ +package machineoutput + +import ( + "bufio" + "errors" + "fmt" + "io" + "time" + + "k8s.io/klog" +) + +// formatTime returns time in UTC Unix Epoch Seconds and then the microsecond portion of that time. +func formatTime(time time.Time) string { + result := fmt.Sprintf("%d.%06d", time.Unix(), time.Nanosecond()/1000) + return result + +} + +// TimestampNow returns timestamp in format of (seconds since UTC Unix epoch).(microseconds time component) +func TimestampNow() string { + return formatTime(time.Now()) +} + +// NewNoOpMachineEventLoggingClient creates a new instance of NoOpMachineEventLoggingClient, +// which will ignore any provided events. +func NewNoOpMachineEventLoggingClient() *NoOpMachineEventLoggingClient { + return &NoOpMachineEventLoggingClient{} +} + +var _ MachineEventLoggingClient = &NoOpMachineEventLoggingClient{} + +// DevFileCommandExecutionBegin ignores the provided event. +func (c *NoOpMachineEventLoggingClient) DevFileCommandExecutionBegin(commandID string, componentName string, commandLine string, groupKind string, timestamp string) { +} + +// DevFileCommandExecutionComplete ignores the provided event. +func (c *NoOpMachineEventLoggingClient) DevFileCommandExecutionComplete(commandID string, componentName string, commandLine string, groupKind string, timestamp string, errorVal error) { +} + +// CreateContainerOutputWriter ignores the provided event. +func (c *NoOpMachineEventLoggingClient) CreateContainerOutputWriter() (*io.PipeWriter, chan interface{}, *io.PipeWriter, chan interface{}) { + + channels := []chan interface{}{make(chan interface{}), make(chan interface{})} + + // Ensure there is always a result waiting on each of the channels + for _, channelPtr := range channels { + channelVal := channelPtr + + go func(channel chan interface{}) { + for { + channel <- nil + } + }(channelVal) + } + + return nil, channels[0], nil, channels[1] +} + +// ReportError ignores the provided event. +func (c *NoOpMachineEventLoggingClient) ReportError(errorVal error, timestamp string) {} + +// NewConsoleMachineEventLoggingClient creates a new instance of ConsoleMachineEventLoggingClient, +// which will output events as JSON to the console. +func NewConsoleMachineEventLoggingClient() *ConsoleMachineEventLoggingClient { + return &ConsoleMachineEventLoggingClient{} +} + +var _ MachineEventLoggingClient = &ConsoleMachineEventLoggingClient{} + +// DevFileCommandExecutionBegin outputs the provided event as JSON to the console. +func (c *ConsoleMachineEventLoggingClient) DevFileCommandExecutionBegin(commandID string, componentName string, commandLine string, groupKind string, timestamp string) { + + json := MachineEventWrapper{ + DevFileCommandExecutionBegin: &DevFileCommandExecutionBegin{ + CommandID: commandID, + ComponentName: componentName, + CommandLine: commandLine, + GroupKind: groupKind, + AbstractLogEvent: AbstractLogEvent{Timestamp: timestamp}, + }, + } + + OutputSuccessUnindented(json) +} + +// DevFileCommandExecutionComplete outputs the provided event as JSON to the console. +func (c *ConsoleMachineEventLoggingClient) DevFileCommandExecutionComplete(commandID string, componentName string, commandLine string, groupKind string, timestamp string, errorVal error) { + + errorStr := "" + + if errorVal != nil { + errorStr = errorVal.Error() + } + + json := MachineEventWrapper{ + DevFileCommandExecutionComplete: &DevFileCommandExecutionComplete{ + DevFileCommandExecutionBegin: DevFileCommandExecutionBegin{ + CommandID: commandID, + ComponentName: componentName, + CommandLine: commandLine, + GroupKind: groupKind, + AbstractLogEvent: AbstractLogEvent{Timestamp: timestamp}, + }, + Error: errorStr, + }, + } + + OutputSuccessUnindented(json) +} + +// CreateContainerOutputWriter returns an io.PipeWriter for which the devfile command/action process output should be +// written (for example by passing the io.PipeWriter to exec.ExecuteCommand), and a channel for communicating when the last data +// has been received on the reader. +// +// All text written to the returned object will be output as a log text event. +// Returned channels will each contain a single nil entry once the underlying reader has closed. +func (c *ConsoleMachineEventLoggingClient) CreateContainerOutputWriter() (*io.PipeWriter, chan interface{}, *io.PipeWriter, chan interface{}) { + + stdoutWriter, stdoutChannel := createWriterAndChannel(false) + stderrWriter, stderrChannel := createWriterAndChannel(true) + + return stdoutWriter, stdoutChannel, stderrWriter, stderrChannel + +} + +func createWriterAndChannel(stderr bool) (*io.PipeWriter, chan interface{}) { + reader, writer := io.Pipe() + + closeChannel := make(chan interface{}) + + stream := "stdout" + if stderr { + stream = "stderr" + } + + go func() { + + bufReader := bufio.NewReader(reader) + for { + line, _, err := bufReader.ReadLine() + if err != nil { + klog.V(4).Infof("Unexpected error on reading container output reader: %v", err) + break + } + + json := MachineEventWrapper{ + LogText: &LogText{ + AbstractLogEvent: AbstractLogEvent{Timestamp: TimestampNow()}, + Text: string(line), + Stream: stream, + }, + } + OutputSuccessUnindented(json) + } + + // Output a single nil event on the channel to inform that the last line of text has been + // received from the writer. + closeChannel <- nil + }() + + return writer, closeChannel +} + +// ReportError ignores the provided event. +func (c *ConsoleMachineEventLoggingClient) ReportError(errorVal error, timestamp string) { + json := MachineEventWrapper{ + ReportError: &ReportError{ + Error: errorVal.Error(), + AbstractLogEvent: AbstractLogEvent{Timestamp: timestamp}, + }, + } + + OutputSuccessUnindented(json) +} + +// GetEntry will return the JSON event parsed from a single line of '-o json' machine readable console output. +// Currently used for test purposes only. +func (w MachineEventWrapper) GetEntry() (MachineEventLogEntry, error) { + + if w.DevFileCommandExecutionBegin != nil { + return w.DevFileCommandExecutionBegin, nil + + } else if w.DevFileCommandExecutionComplete != nil { + return w.DevFileCommandExecutionComplete, nil + + } else if w.LogText != nil { + return w.LogText, nil + + } else if w.ReportError != nil { + return w.ReportError, nil + + } else { + return nil, errors.New("unexpected machine event log entry") + } + +} + +// GetTimestamp returns the timestamp element for this event. +func (c AbstractLogEvent) GetTimestamp() string { return c.Timestamp } + +// GetType returns the event type for this event. +func (c DevFileCommandExecutionBegin) GetType() MachineEventLogEntryType { + return TypeDevFileCommandExecutionBegin +} + +// GetType returns the event type for this event. +func (c DevFileCommandExecutionComplete) GetType() MachineEventLogEntryType { + return TypeDevFileCommandExecutionComplete +} + +// GetType returns the event type for this event. +func (c LogText) GetType() MachineEventLogEntryType { return TypeLogText } + +// GetType returns the event type for this event. +func (c ReportError) GetType() MachineEventLogEntryType { return TypeReportError } + +// MachineEventLogEntryType indicates the machine-readable event type from an ODO operation +type MachineEventLogEntryType int + +const ( + // TypeDevFileCommandExecutionBegin is the entry type for that event. + TypeDevFileCommandExecutionBegin MachineEventLogEntryType = 0 + // TypeDevFileCommandExecutionComplete is the entry type for that event. + TypeDevFileCommandExecutionComplete MachineEventLogEntryType = 1 + // TypeLogText is the entry type for that event. + TypeLogText MachineEventLogEntryType = 2 + // TypeReportError is the entry type for that event. + TypeReportError MachineEventLogEntryType = 3 +) + +// GetCommandName returns a command if the MLE supports that field (otherwise empty string is returned). +// Currently used for test purposes only. +func GetCommandName(entry MachineEventLogEntry) string { + + if entry.GetType() == TypeDevFileCommandExecutionBegin { + return entry.(*DevFileCommandExecutionBegin).CommandID + } else if entry.GetType() == TypeDevFileCommandExecutionComplete { + return entry.(*DevFileCommandExecutionComplete).CommandID + } else { + return "" + } + +} diff --git a/pkg/machineoutput/types.go b/pkg/machineoutput/types.go index d2603d4180f..00aa0265847 100644 --- a/pkg/machineoutput/types.go +++ b/pkg/machineoutput/types.go @@ -3,6 +3,7 @@ package machineoutput import ( "encoding/json" "fmt" + "sync" "github.com/openshift/odo/pkg/log" @@ -30,6 +31,26 @@ type GenericSuccess struct { Message string `json:"message"` } +// unindentedMutex prevents multiple JSON objects from being outputted simultaneously on the same line. This is only +// required for OutputSuccessUnindented's 'unindented' JSON objects, since objects printed by other methods are not written from +// multiple threads. +var unindentedMutex = &sync.Mutex{} + +// OutputSuccessUnindented outputs a "successful" machine-readable output format in unindented json +func OutputSuccessUnindented(machineOutput interface{}) { + printableOutput, err := json.Marshal(machineOutput) + + unindentedMutex.Lock() + defer unindentedMutex.Unlock() + + // If we error out... there's no way to output it (since we disable logging when using -o json) + if err != nil { + fmt.Fprintf(log.GetStderr(), "Unable to unmarshal JSON: %s\n", err.Error()) + } else { + fmt.Fprintf(log.GetStdout(), "%s\n", string(printableOutput)) + } +} + // OutputSuccess outputs a "successful" machine-readable output format in json func OutputSuccess(machineOutput interface{}) { printableOutput, err := MarshalJSONIndented(machineOutput) diff --git a/pkg/machineoutput/types_event_logging.go b/pkg/machineoutput/types_event_logging.go new file mode 100644 index 00000000000..8a79c928656 --- /dev/null +++ b/pkg/machineoutput/types_event_logging.go @@ -0,0 +1,82 @@ +package machineoutput + +import ( + "io" +) + +// MachineEventLoggingClient is an interface which is used by consuming code to output machine-readable +// event JSON to the console. Both no-op and non-no-op implementations of this interface exist. +type MachineEventLoggingClient interface { + + // These functions output the corresponding eponymous JSON event to the console + + DevFileCommandExecutionBegin(commandID string, componentName string, commandLine string, groupKind string, timestamp string) + DevFileCommandExecutionComplete(commandID string, componentName string, commandLine string, groupKind string, timestamp string, errorVal error) + ReportError(errorVal error, timestamp string) + + // CreateContainerOutputWriter is used to capture output from container processes, and synchronously write it to the screen as LogText. See implementation comments for details. + CreateContainerOutputWriter() (*io.PipeWriter, chan interface{}, *io.PipeWriter, chan interface{}) +} + +// MachineEventWrapper - a single line of machine-readable event console output must contain only one +// of these commands; the MachineEventWrapper is used to create (and parse, for tests) these lines. +type MachineEventWrapper struct { + DevFileCommandExecutionBegin *DevFileCommandExecutionBegin `json:"devFileCommandExecutionBegin,omitempty"` + DevFileCommandExecutionComplete *DevFileCommandExecutionComplete `json:"devFileCommandExecutionComplete,omitempty"` + LogText *LogText `json:"logText,omitempty"` + ReportError *ReportError `json:"reportError,omitempty"` +} + +// DevFileCommandExecutionBegin is the JSON event that is emitted when a dev file command begins execution. +type DevFileCommandExecutionBegin struct { + CommandID string `json:"commandId"` + ComponentName string `json:"componentName"` + CommandLine string `json:"commandLine"` + GroupKind string `json:"groupKind"` + AbstractLogEvent +} + +// DevFileCommandExecutionComplete is the JSON event that is emitted when a dev file command completes execution. +type DevFileCommandExecutionComplete struct { + DevFileCommandExecutionBegin + Error string `json:"error,omitempty"` +} + +// ReportError is the JSON event that is emitted when an error occurs during push command +type ReportError struct { + Error string `json:"error"` + AbstractLogEvent +} + +// LogText is the JSON event that is emitted when a dev file action outputs text to the console. +type LogText struct { + Text string `json:"text"` + Stream string `json:"stream"` + AbstractLogEvent +} + +// AbstractLogEvent is the base struct for all events; all events must at a minimum contain a timestamp. +type AbstractLogEvent struct { + Timestamp string `json:"timestamp"` +} + +// Ensure the various events correctly implement the desired interface. +var _ MachineEventLogEntry = &DevFileCommandExecutionBegin{} +var _ MachineEventLogEntry = &DevFileCommandExecutionComplete{} +var _ MachineEventLogEntry = &LogText{} +var _ MachineEventLogEntry = &ReportError{} + +// MachineEventLogEntry contains the expected methods for every event that is emitted. +// (This is mainly used for test purposes.) +type MachineEventLogEntry interface { + GetTimestamp() string + GetType() MachineEventLogEntryType +} + +// NoOpMachineEventLoggingClient will ignore (eg not output) all events passed to it +type NoOpMachineEventLoggingClient struct { +} + +// ConsoleMachineEventLoggingClient will output all events to the console as JSON +type ConsoleMachineEventLoggingClient struct { +} diff --git a/pkg/notify/notify.go b/pkg/notify/notify.go index 80df66977b3..b41e5eaa5d6 100644 --- a/pkg/notify/notify.go +++ b/pkg/notify/notify.go @@ -12,8 +12,6 @@ import ( const ( // VersionFetchURL is the URL to fetch latest version number VersionFetchURL = "https://raw.githubusercontent.com/openshift/odo/master/build/VERSION" - // InstallScriptURL is URL of the installation shell script - InstallScriptURL = "https://raw.githubusercontent.com/openshift/odo/master/scripts/installer.sh" ) // getLatestReleaseTag polls odo's upstream GitHub repository to get the diff --git a/pkg/occlient/occlient.go b/pkg/occlient/occlient.go index 9d25b663adf..02809cad5fe 100644 --- a/pkg/occlient/occlient.go +++ b/pkg/occlient/occlient.go @@ -23,6 +23,7 @@ import ( "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/devfile/adapters/common" "github.com/openshift/odo/pkg/log" + "github.com/openshift/odo/pkg/odo/util/experimental" "github.com/openshift/odo/pkg/preference" "github.com/openshift/odo/pkg/util" @@ -103,6 +104,9 @@ const ( // timeout for getting the default service account getDefaultServiceAccTimeout = 1 * time.Minute + // timeout for waiting for project deletion + waitForProjectDeletionTimeOut = 3 * time.Minute + // The length of the string to be generated for names of resources nameLength = 5 @@ -750,6 +754,9 @@ func (c *Client) GetImageStream(imageNS string, imageName string, imageTag strin } if e != nil && err != nil { // Imagestream not found in openshift and current namespaces + if experimental.IsExperimentalModeEnabled() { + return nil, fmt.Errorf("component type %q not found", imageName) + } return nil, err } @@ -2214,67 +2221,102 @@ func (c *Client) DeleteServiceInstance(labels map[string]string) error { } // DeleteProject deletes given project -func (c *Client) DeleteProject(name string) error { - err := c.projectClient.Projects().Delete(name, &metav1.DeleteOptions{}) - if err != nil { - return errors.Wrap(err, "unable to delete project") +// +// NOTE: +// There is a very specific edge case that may happen during project deletion when deleting a project and then immediately creating another. +// Unfortunately, despite the watch interface, we cannot safely determine if the project is 100% deleted. See this link: +// https://stackoverflow.com/questions/48208001/deleted-openshift-online-pro-project-has-left-a-trace-so-cannot-create-project-o +// Will Gordon (Engineer @ Red Hat) describes the issue: +// +// "Projects are deleted asynchronously after you send the delete command. So it's possible that the deletion just hasn't been reconciled yet. It should happen within a minute or so, so try again. +// Also, please be aware that in a multitenant environment, like OpenShift Online, you are prevented from creating a project with the same name as any other project in the cluster, even if it's not your own. So if you can't create the project, it's possible that someone has already created a project with the same name." +func (c *Client) DeleteProject(name string, wait bool) error { + + // Instantiate watcher for our "wait" function + var watcher watch.Interface + var err error + + // If --wait has been passed, we will wait for the project to fully be deleted + if wait { + watcher, err = c.projectClient.Projects().Watch(metav1.ListOptions{ + FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), + }) + if err != nil { + return errors.Wrapf(err, "unable to watch project") + } + defer watcher.Stop() } - // wait for delete to complete - w, err := c.projectClient.Projects().Watch(metav1.ListOptions{ - FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), - }) + // Delete the project + err = c.projectClient.Projects().Delete(name, &metav1.DeleteOptions{}) if err != nil { - return errors.Wrapf(err, "unable to watch project") + return errors.Wrap(err, "unable to delete project") } - defer w.Stop() - for { - val, ok := <-w.ResultChan() - // When marked for deletion... val looks like: - /* - val: { - Type:MODIFIED - Object:&Project{ - ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{...}, - Spec:ProjectSpec{...}, - Status:ProjectStatus{ - Phase:Terminating, - }, - } - } - */ - // Post deletion val will look like: - /* - val: { - Type:DELETED - Object:&Project{ - ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{...}, - Spec:ProjectSpec{...}, - Status:ProjectStatus{ - Phase:, - }, - } - } - */ - if !ok { - return fmt.Errorf("received unexpected signal %+v on project watch channel", val) - } - // So we depend on val.Type as val.Object.Status.Phase is just empty string and not a mapped value constant - if prj, ok := val.Object.(*projectv1.Project); ok { - klog.V(4).Infof("Status of delete of project %s is %s", name, prj.Status.Phase) - switch prj.Status.Phase { - //prj.Status.Phase can only be "Terminating" or "Active" or "" - case "": - if val.Type == watch.Deleted { - return nil + // If watcher has been created (wait was passed) we will create a go routine and actually **wait** + // until *EVERYTHING* is successfully deleted. + if watcher != nil { + + // Project channel + // Watch error channel + projectChannel := make(chan *projectv1.Project) + watchErrorChannel := make(chan error) + + // Create a go routine to run in the background + go func() { + + for { + + // If watch unexpected has been closed.. + val, ok := <-watcher.ResultChan() + if !ok { + //return fmt.Errorf("received unexpected signal %+v on project watch channel", val) + watchErrorChannel <- errors.Errorf("watch channel was closed unexpectedly: %+v", val) + break } - if val.Type == watch.Error { - return fmt.Errorf("failed watching the deletion of project %s", name) + + // So we depend on val.Type as val.Object.Status.Phase is just empty string and not a mapped value constant + if projectStatus, ok := val.Object.(*projectv1.Project); ok { + + klog.V(4).Infof("Status of delete of project %s is '%s'", name, projectStatus.Status.Phase) + + switch projectStatus.Status.Phase { + //projectStatus.Status.Phase can only be "Terminating" or "Active" or "" + case "": + if val.Type == watch.Deleted { + projectChannel <- projectStatus + break + } + if val.Type == watch.Error { + watchErrorChannel <- errors.Errorf("failed watching the deletion of project %s", name) + break + } + } + + } else { + watchErrorChannel <- errors.New("unable to convert event object to Project") + break } + } + close(projectChannel) + close(watchErrorChannel) + }() + + select { + case val := <-projectChannel: + klog.V(4).Infof("Deletion information for project: %+v", val) + return nil + case err := <-watchErrorChannel: + return err + case <-time.After(waitForProjectDeletionTimeOut): + return errors.Errorf("waited %s but couldn't delete project %s in time", waitForProjectDeletionTimeOut, name) } + } + + // Return nil since we don't bother checking for the watcher.. + return nil } // GetDeploymentConfigLabelValues get label values of given label from objects in project that are matching selector diff --git a/pkg/occlient/pods.go b/pkg/occlient/pods.go new file mode 100644 index 00000000000..b5f7a689786 --- /dev/null +++ b/pkg/occlient/pods.go @@ -0,0 +1,22 @@ +package occlient + +import ( + "fmt" + componentlabels "github.com/openshift/odo/pkg/component/labels" + "github.com/openshift/odo/pkg/util" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" +) + +func (c *Client) GetPodUsingComponentName(componentName, appName string) (*corev1.Pod, error) { + componentLabels := componentlabels.GetLabels(componentName, appName, false) + componentSelector := util.ConvertLabelsToSelector(componentLabels) + dc, err := c.GetOneDeploymentConfigFromSelector(componentSelector) + if err != nil { + return nil, errors.Wrap(err, "unable to get deployment for component") + } + // Find Pod for component + podSelector := fmt.Sprintf("deploymentconfig=%s", dc.Name) + + return c.GetOnePodFromSelector(podSelector) +} diff --git a/pkg/odo/cli/application/describe.go b/pkg/odo/cli/application/describe.go index 1441168136d..08fac8ea72f 100644 --- a/pkg/odo/cli/application/describe.go +++ b/pkg/odo/cli/application/describe.go @@ -58,7 +58,11 @@ func (o *DescribeOptions) Validate() (err error) { return fmt.Errorf("There's no active application in project: %v", o.Project) } - return nil + exist, err := application.Exists(o.appName, o.Client) + if !exist { + return fmt.Errorf("%s app does not exists", o.appName) + } + return err } // Run contains the logic for the odo command diff --git a/pkg/odo/cli/catalog/list/components.go b/pkg/odo/cli/catalog/list/components.go index 78e10fc8a02..aeda8c42ce9 100644 --- a/pkg/odo/cli/catalog/list/components.go +++ b/pkg/odo/cli/catalog/list/components.go @@ -10,11 +10,13 @@ import ( "github.com/openshift/odo/pkg/catalog" "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/machineoutput" - "github.com/openshift/odo/pkg/odo/cli/catalog/util" + catalogutil "github.com/openshift/odo/pkg/odo/cli/catalog/util" "github.com/openshift/odo/pkg/odo/genericclioptions" "github.com/openshift/odo/pkg/odo/util/experimental" "github.com/openshift/odo/pkg/odo/util/pushtarget" + "github.com/openshift/odo/pkg/util" "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) @@ -42,32 +44,39 @@ func NewListComponentsOptions() *ListComponentsOptions { // Complete completes ListComponentsOptions after they've been created func (o *ListComponentsOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { + + tasks := util.NewConcurrentTasks(2) + if !pushtarget.IsPushTargetDocker() { o.Context = genericclioptions.NewContext(cmd) - o.catalogList, err = catalog.ListComponents(o.Client) - if err != nil { - if experimental.IsExperimentalModeEnabled() { - klog.V(4).Info("Please log in to an OpenShift cluster to list OpenShift/s2i components") + + tasks.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + o.catalogList, err = catalog.ListComponents(o.Client) + if err != nil { + if experimental.IsExperimentalModeEnabled() { + klog.V(4).Info("Please log in to an OpenShift cluster to list OpenShift/s2i components") + } else { + errChannel <- err + } } else { - return err + o.catalogList.Items = catalogutil.FilterHiddenComponents(o.catalogList.Items) } - } - - o.catalogList.Items = util.FilterHiddenComponents(o.catalogList.Items) + }}) } if experimental.IsExperimentalModeEnabled() { - o.catalogDevfileList, err = catalog.ListDevfileComponents("") - if err != nil { - return err - } - - if o.catalogDevfileList.DevfileRegistries == nil { - log.Warning("Please run 'odo registry add ' to add registry for listing devfile components\n") - } + tasks.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + o.catalogDevfileList, err = catalog.ListDevfileComponents("") + if o.catalogDevfileList.DevfileRegistries == nil { + log.Warning("Please run 'odo registry add ' to add registry for listing devfile components\n") + } + if err != nil { + errChannel <- err + } + }}) } - return + return tasks.Run() } // Validate validates the ListComponentsOptions based on completed values @@ -79,6 +88,13 @@ func (o *ListComponentsOptions) Validate() (err error) { return err } +type combinedCatalogList struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + S2iItems []catalog.ComponentType `json:"s2iItems,omitempty"` + DevfileItems []catalog.DevfileComponentType `json:"devfileItems,omitempty"` +} + // Run contains the logic for the command associated with ListComponentsOptions func (o *ListComponentsOptions) Run() (err error) { if log.IsJSON() { @@ -87,7 +103,19 @@ func (o *ListComponentsOptions) Run() (err error) { supported, _ := catalog.SliceSupportedTags(image) o.catalogList.Items[i].Spec.SupportedTags = supported } - machineoutput.OutputSuccess(o.catalogList) + if experimental.IsExperimentalModeEnabled() { + combinedList := combinedCatalogList{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "odo.dev/v1alpha1", + }, + S2iItems: o.catalogList.Items, + DevfileItems: o.catalogDevfileList.Items, + } + machineoutput.OutputSuccess(combinedList) + } else { + machineoutput.OutputSuccess(o.catalogList) + } } else { w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) var supCatalogList, unsupCatalogList []catalog.ComponentType diff --git a/pkg/odo/cli/component/create.go b/pkg/odo/cli/component/create.go index 068901dedf2..786d4be0649 100644 --- a/pkg/odo/cli/component/create.go +++ b/pkg/odo/cli/component/create.go @@ -57,6 +57,12 @@ type CreateOptions struct { devfileMetadata DevfileMetadata } +// Path of user's own devfile, user specifies the path via --devfile flag +type devfilePath struct { + protocol string + value string +} + // DevfileMetadata includes devfile component metadata type DevfileMetadata struct { componentType string @@ -65,6 +71,7 @@ type DevfileMetadata struct { devfileSupport bool devfileLink string devfileRegistry catalog.Registry + devfilePath devfilePath downloadSource string } @@ -78,15 +85,20 @@ const LocalDirectoryDefaultLocation = "./" // Constants for devfile component const devFile = "devfile.yaml" const envFile = ".odo/env/env.yaml" +const configFile = ".odo/config.yaml" -// DevfilePath is the path of devfile.yaml, the default path is "./devfile.yaml" +// DevfilePath is the devfile path that is used by odo, +// which means odo can: +// 1. Directly use the devfile in DevfilePath +// 2. Download devfile from registry to DevfilePath then use the devfile in DevfilePath +// 3. Copy user's own devfile (path is specified via --devfile flag) to DevfilePath then use the devfile in DevfilePath var DevfilePath = filepath.Join(LocalDirectoryDefaultLocation, devFile) -// EnvFilePath is the path of env.yaml for devfile component, the defult path is "./.odo/env/env.yaml" +// EnvFilePath is the path of env file for devfile component var EnvFilePath = filepath.Join(LocalDirectoryDefaultLocation, envFile) -// ConfigFilePath is the default path of config.yaml for s2i component -const ConfigFilePath = "./.odo/config.yaml" +// ConfigFilePath is the path of config.yaml for s2i component +var ConfigFilePath = filepath.Join(LocalDirectoryDefaultLocation, configFile) var createLongDesc = ktemplates.LongDesc(`Create a configuration describing a component. @@ -98,6 +110,14 @@ By default, builder images (component type) will be used from the current namesp If version is not specified by default, latest will be chosen as the version.`) var createExample = ktemplates.Examples(` # Create new Node.js component with the source in current directory. + +Note: When you use odo with experimental mode enabled and create devfile component, if you want to use existing devfile the first argument will be the component name +# Create new Node.js component with existing devfile +%[1]s mynodejs (devfile exists in current working directory) +%[1]s mynodejs --devfile ./devfile.yaml (devfile exists in any other directory) +%[1]s mynodejs --devfile https://raw.githubusercontent.com/elsony/devfile-registry/master/devfiles/nodejs/devfile.yaml (devfile exists in network) + +# Create new Node.js component %[1]s nodejs # Create new Node.js component named 'frontend' with the source in './frontend' directory @@ -308,15 +328,56 @@ func (co *CreateOptions) Complete(name string, cmd *cobra.Command, args []string // Add a disclaimer that we are in *experimental mode* log.Experimental("Experimental mode is enabled, use at your own risk") - if util.CheckPathExists(ConfigFilePath) { + // Configure the context + if co.componentContext != "" { + DevfilePath = filepath.Join(co.componentContext, devFile) + EnvFilePath = filepath.Join(co.componentContext, envFile) + ConfigFilePath = filepath.Join(co.componentContext, configFile) + co.CommonPushOptions.componentContext = co.componentContext + } + + if util.CheckPathExists(ConfigFilePath) || util.CheckPathExists(EnvFilePath) { return errors.New("This directory already contains a component") } - if len(args) == 0 { + if util.CheckPathExists(DevfilePath) && co.devfileMetadata.devfilePath.value != "" && !util.PathEqual(DevfilePath, co.devfileMetadata.devfilePath.value) { + return errors.New("This directory already contains a devfile, you can't specify devfile via --devfile") + } + + // Validate user specify devfile path + if co.devfileMetadata.devfilePath.value != "" { + fileErr := util.ValidateFile(co.devfileMetadata.devfilePath.value) + urlErr := util.ValidateURL(co.devfileMetadata.devfilePath.value) + if fileErr != nil && urlErr != nil { + return errors.Errorf("The devfile path you specify is invalid with either file error \"%v\" or url error \"%v\"", fileErr, urlErr) + } else if fileErr == nil { + co.devfileMetadata.devfilePath.protocol = "file" + } else if urlErr == nil { + co.devfileMetadata.devfilePath.protocol = "http(s)" + } + } + + // Validate user specify registry + if co.devfileMetadata.devfileRegistry.Name != "" { + if co.devfileMetadata.devfilePath.value != "" { + return errors.New("You can't specify registry via --registry if you want to use the devfile that is specified via --devfile") + } + + registryList, err := catalog.GetDevfileRegistries(co.devfileMetadata.devfileRegistry.Name) + if err != nil { + return errors.Wrap(err, "Failed to get registry") + } + if len(registryList) == 0 { + return errors.Errorf("Registry %s doesn't exist, please specify a valid registry via --registry", co.devfileMetadata.devfileRegistry.Name) + } + } + + // Can't use the existing devfile or download devfile from registry, go to interactive mode + if len(args) == 0 && !util.CheckPathExists(DevfilePath) && co.devfileMetadata.devfilePath.value == "" { co.interactive = true } - // Default namespace setup + // Configure the default namespace var defaultComponentNamespace string // If the push target is set to Docker, we can't assume we have an active Kube context if !pushtarget.IsPushTargetDocker() { @@ -328,88 +389,96 @@ func (co *CreateOptions) Complete(name string, cmd *cobra.Command, args []string defaultComponentNamespace = client.Namespace } - // Configure the context - if len(co.componentContext) != 0 { - DevfilePath = filepath.Join(co.componentContext, devFile) - EnvFilePath = filepath.Join(co.componentContext, envFile) - co.CommonPushOptions.componentContext = co.componentContext - } - - catalogDevfileList, err := catalog.ListDevfileComponents(co.devfileMetadata.devfileRegistry.Name) - if err != nil { - return err - } - if catalogDevfileList.DevfileRegistries == nil { - log.Warning("Please run `odo registry add ` to add a registry then create a devfile components\n") - } - + var catalogDevfileList catalog.DevfileComponentTypeList var componentType string var componentName string var componentNamespace string if co.interactive { // Interactive mode - // Get component type, name and namespace from user's choice via interactive mode - // devfile.yaml is not present, user has to specify the component type - // Component type: We provide supported devfile component list then let you choose - if !util.CheckPathExists(DevfilePath) { - var supDevfileCatalogList []catalog.DevfileComponentType - for _, devfileComponent := range catalogDevfileList.Items { - if devfileComponent.Support { - supDevfileCatalogList = append(supDevfileCatalogList, devfileComponent) - } + // Component type: We provide supported devfile component list to let user choose + catalogDevfileList, err := catalog.ListDevfileComponents(co.devfileMetadata.devfileRegistry.Name) + if err != nil { + return err + } + if catalogDevfileList.DevfileRegistries == nil { + log.Warning("Registry is empty, please run `odo registry add ` to add a registry\n") + } + var supDevfileCatalogList []catalog.DevfileComponentType + for _, devfileComponent := range catalogDevfileList.Items { + if devfileComponent.Support { + supDevfileCatalogList = append(supDevfileCatalogList, devfileComponent) } - componentType = ui.SelectDevfileComponentType(supDevfileCatalogList) } + componentType = ui.SelectDevfileComponentType(supDevfileCatalogList) // Component name: User needs to specify the componet name, by default it is component type that user chooses componentName = ui.EnterDevfileComponentName(componentType) - // Component namespace: User needs to specify component namespace, - // by default it is the current active namespace if it can't get from --project flag or --namespace flag - if len(co.devfileMetadata.componentNamespace) == 0 { - if cmd.Flags().Changed("project") { - componentNamespace, err = cmd.Flags().GetString("project") - if err != nil { - return err - } - } else { - componentNamespace = ui.EnterDevfileComponentNamespace(defaultComponentNamespace) + // Component namespace: User needs to specify component namespace, by default it is the current active namespace + if cmd.Flags().Changed("project") && !pushtarget.IsPushTargetDocker() { + componentNamespace, err = cmd.Flags().GetString("project") + if err != nil { + return err } } else { - componentNamespace = co.devfileMetadata.componentNamespace + componentNamespace = ui.EnterDevfileComponentNamespace(defaultComponentNamespace) } } else { // Direct mode (User enters the full command) - // Get component type, name and namespace from user's full command - - if util.CheckPathExists(DevfilePath) { - return errors.New("This directory already contains a devfile.yaml, please delete it and run the component creation command again") - } - // Component type: Get from full command's first argument (mandatory in direct mode) - componentType = args[0] + if util.CheckPathExists(DevfilePath) || co.devfileMetadata.devfilePath.value != "" { + // Use existing devfile directly - // Component name: Get from full command's second argument (optional in direct mode), by default it is component type from first argument - if len(args) == 2 { - componentName = args[1] - } else { - componentName = args[0] - } + if len(args) > 1 { + return errors.Errorf("Accepts between 0 and 1 arg when using existing devfile, received %d", len(args)) + } - // Component namespace: Get from --project flag or --namespace flag, by default it is the current active namespace - if len(co.devfileMetadata.componentNamespace) == 0 { - if cmd.Flags().Changed("project") { - componentNamespace, err = cmd.Flags().GetString("project") + // If user can use existing devfile directly, the first arg is component name instead of component type + if len(args) == 1 { + componentName = args[0] + } else { + currentDirPath, err := os.Getwd() if err != nil { return err } + currentDirName := filepath.Base(currentDirPath) + componentName = currentDirName + } + + co.devfileMetadata.devfileSupport = true + } else { + // Download devfile from registry + + // Component type: Get from full command's first argument (mandatory in direct mode) + componentType = args[0] + + // Component name: Get from full command's second argument (optional in direct mode), by default it is component type from first argument + if len(args) == 2 { + componentName = args[1] } else { - componentNamespace = defaultComponentNamespace + componentName = args[0] + } + + // Get available devfile components for checking devfile compatibility + catalogDevfileList, err = catalog.ListDevfileComponents(co.devfileMetadata.devfileRegistry.Name) + if err != nil { + return err + } + if catalogDevfileList.DevfileRegistries == nil { + log.Warning("Registry is empty, please run `odo registry add ` to add a registry\n") + } + } + + // Component namespace: Get from --project flag, by default it is the current active namespace + if cmd.Flags().Changed("project") && !pushtarget.IsPushTargetDocker() { + componentNamespace, err = cmd.Flags().GetString("project") + if err != nil { + return err } } else { - componentNamespace = co.devfileMetadata.componentNamespace + componentNamespace = defaultComponentNamespace } } @@ -418,50 +487,85 @@ func (co *CreateOptions) Complete(name string, cmd *cobra.Command, args []string co.devfileMetadata.componentName = strings.ToLower(componentName) co.devfileMetadata.componentNamespace = strings.ToLower(componentNamespace) - // If devfile.yaml is present, we don't need to download the devfile.yaml later - if util.CheckPathExists(DevfilePath) { - co.devfileMetadata.devfileSupport = true + // Categorize the sections + log.Info("Validation") + + if util.CheckPathExists(DevfilePath) || co.devfileMetadata.devfilePath.value != "" { + var devfileAbsolutePath string + if util.CheckPathExists(DevfilePath) || co.devfileMetadata.devfilePath.protocol == "file" { + var devfilePath string + if util.CheckPathExists(DevfilePath) { + devfilePath = DevfilePath + } else { + devfilePath = co.devfileMetadata.devfilePath.value + } + devfileAbsolutePath, err = filepath.Abs(devfilePath) + if err != nil { + return err + } + } else if co.devfileMetadata.devfilePath.protocol == "http(s)" { + devfileAbsolutePath = co.devfileMetadata.devfilePath.value + } + devfileSpinner := log.Spinnerf("Creating a devfile component from devfile path: %s", devfileAbsolutePath) + defer devfileSpinner.End(true) + // Initialize envinfo err = co.InitEnvInfoFromContext() if err != nil { return err } return nil - } + } else { + // Since we need to support both devfile and s2i, so we have to check if the component type is + // supported by devfile, if it is supported we return and will download the corresponding devfile later, + // if it is not supported we still need to run all the codes related with s2i after devfile compatibility check - // Categorize the sections - log.Info("Validation") + hasComponent := false - // Since we need to support both devfile and s2i, so we have to check if the component type is - // supported by devfile, if it is supported we return and will download the corresponding devfile.yaml later, - // but if it is not supported we still need to run all codes related with s2i - spinner := log.Spinner("Checking devfile compatibility") + for _, devfileComponent := range catalogDevfileList.Items { + if co.devfileMetadata.componentType == devfileComponent.Name { + hasComponent = true + if devfileComponent.Support { + co.devfileMetadata.devfileSupport = true + co.devfileMetadata.devfileLink = devfileComponent.Link + co.devfileMetadata.devfileRegistry = devfileComponent.Registry + break + } + } + } - for _, devfileComponent := range catalogDevfileList.Items { - if co.devfileMetadata.componentType == devfileComponent.Name && devfileComponent.Support { - co.devfileMetadata.devfileSupport = true - co.devfileMetadata.devfileLink = devfileComponent.Link - co.devfileMetadata.devfileRegistry = devfileComponent.Registry + existSpinner := log.Spinner("Checking devfile existence") + if hasComponent { + existSpinner.End(true) + } else { + existSpinner.End(false) } - } - registrySpinner := log.Spinnerf("Creating a devfile component from registry: %s", co.devfileMetadata.devfileRegistry.Name) + supportSpinner := log.Spinner("Checking devfile compatibility") + if co.devfileMetadata.devfileSupport { + registrySpinner := log.Spinnerf("Creating a devfile component from registry: %s", co.devfileMetadata.devfileRegistry.Name) - if co.devfileMetadata.devfileSupport { - err = co.InitEnvInfoFromContext() - if err != nil { - return err + // Initialize envinfo + err = co.InitEnvInfoFromContext() + if err != nil { + return err + } + + supportSpinner.End(true) + registrySpinner.End(true) + return nil } + supportSpinner.End(false) - spinner.End(true) - registrySpinner.End(true) - return nil + // Currently only devfile component supports --registry flag, so if user specifies --registry when creating devfile component, + // we should error out instead of running s2i componet code and throw warning message + if co.devfileMetadata.devfileRegistry.Name != "" { + return errors.Errorf("Devfile component type %s is not supported, please run `odo catalog list components` for a list of supported devfile component types", co.devfileMetadata.componentType) + } else { + log.Warningf("Devfile component type %s is not supported, please run `odo catalog list components` for a list of supported devfile component types", co.devfileMetadata.componentType) + } } - - spinner.End(false) - registrySpinner.End(false) - log.Italic("\nPlease run `odo catalog list components` for a list of supported devfile component types") } if len(args) == 0 || !cmd.HasFlags() { @@ -471,14 +575,19 @@ func (co *CreateOptions) Complete(name string, cmd *cobra.Command, args []string // this populates the LocalConfigInfo as well co.Context = genericclioptions.NewContextCreatingAppIfNeeded(cmd) if err != nil { - return errors.Wrap(err, "failed intiating local config") + return errors.Wrap(err, "failed initiating local config") } - // Do not execute S2I specific code on Kubernetes Cluster + // Do not execute S2I specific code on Kubernetes Cluster or Docker // return from here, if it is not an openshift cluster. - openshiftCluster, _ := co.Client.IsImageStreamSupported() + var openshiftCluster bool + if !pushtarget.IsPushTargetDocker() { + openshiftCluster, _ = co.Client.IsImageStreamSupported() + } else { + openshiftCluster = false + } if !openshiftCluster { - return errors.New("component not found") + return errors.New("component type not found") } // check to see if config file exists or not, if it does that @@ -560,7 +669,7 @@ func (co *CreateOptions) Complete(name string, cmd *cobra.Command, args []string } componentName := ui.EnterComponentName(defaultComponentName, co.Context) - appName := ui.EnterOpenshiftName(co.Context.Application, "Which application do you want the commponent to be associated with", co.Context) + appName := ui.EnterOpenshiftName(co.Context.Application, "Which application do you want the component to be associated with", co.Context) co.componentSettings.Application = &appName projectName := ui.EnterOpenshiftName(co.Context.Project, "Which project go you want the component to be created in", co.Context) @@ -668,10 +777,6 @@ func (co *CreateOptions) Validate() (err error) { spinner := log.Spinner("Validating devfile component") defer spinner.End(false) - if util.CheckPathExists(EnvFilePath) { - return errors.New("This workspace directory already contains a devfile component") - } - err = util.ValidateK8sResourceName("component name", co.devfileMetadata.componentName) if err != nil { return err @@ -750,8 +855,8 @@ func (co *CreateOptions) downloadProject(projectPassed string) error { return errors.Wrapf(err, "Could not get the current working directory.") } - if project.ClonePath != nil && *project.ClonePath != "" { - clonePath := *project.ClonePath + if project.ClonePath != "" { + clonePath := project.ClonePath if runtime.GOOS == "windows" { clonePath = strings.Replace(clonePath, "\\", "/", -1) } @@ -770,29 +875,32 @@ func (co *CreateOptions) downloadProject(projectPassed string) error { return err } - var zipUrl string - switch project.Source.Type { - case "git": - if strings.Contains(project.Source.Location, "github.com") { - zipUrl, err = util.GetGitHubZipURL(project.Source.Location) + var url, sparseDir string + if project.Git != nil { + if strings.Contains(project.Git.Location, "github.com") { + url, err = util.GetGitHubZipURL(project.Git.Location) if err != nil { return err } + sparseDir = project.Git.SparseCheckoutDir } else { return errors.Errorf("Project type git with non github url not supported") } - case "github": - zipUrl, err = util.GetGitHubZipURL(project.Source.Location) + } else if project.Github != nil { + url, err = util.GetGitHubZipURL(project.Github.Location) if err != nil { return err } - case "zip": - zipUrl = project.Source.Location - default: + sparseDir = project.Github.SparseCheckoutDir + } else if project.Zip != nil { + url = project.Zip.Location + sparseDir = project.Github.SparseCheckoutDir + } else { return errors.Errorf("Project type not supported") } - err = util.GetAndExtractZip(zipUrl, path) + err = checkoutProject(sparseDir, url, path) + if err != nil { return err } @@ -803,25 +911,47 @@ func (co *CreateOptions) downloadProject(projectPassed string) error { // Run has the logic to perform the required actions as part of command func (co *CreateOptions) Run() (err error) { if experimental.IsExperimentalModeEnabled() { - // Download devfile.yaml file and create env.yaml file if co.devfileMetadata.devfileSupport { + // Use existing devfile directly from --devfile flag + if co.devfileMetadata.devfilePath.value != "" { + if co.devfileMetadata.devfilePath.protocol == "http(s)" { + // User specify devfile path is http(s) URL + err = util.DownloadFile(co.devfileMetadata.devfilePath.value, DevfilePath) + if err != nil { + return errors.Wrapf(err, "failed to download devfile for devfile component from %s", co.devfileMetadata.devfilePath.value) + } + } else if co.devfileMetadata.devfilePath.protocol == "file" { + // User specify devfile path is file system link + info, err := os.Stat(co.devfileMetadata.devfilePath.value) + if err != nil { + return err + } + err = util.CopyFile(co.devfileMetadata.devfilePath.value, DevfilePath, info) + if err != nil { + return errors.Wrapf(err, "failed to copy devfile from %s to %s", co.devfileMetadata.devfilePath, DevfilePath) + } + } + } + if !util.CheckPathExists(DevfilePath) { + // Download devfile from registry err := util.DownloadFile(co.devfileMetadata.devfileRegistry.URL+co.devfileMetadata.devfileLink, DevfilePath) if err != nil { - return errors.Wrap(err, "Faile to download devfile.yaml for devfile component") + return errors.Wrapf(err, "failed to download devfile for devfile component from %s", co.devfileMetadata.devfileRegistry.URL+co.devfileMetadata.devfileLink) } } if util.CheckPathExists(DevfilePath) && co.devfileMetadata.downloadSource != "" { err = co.downloadProject(co.devfileMetadata.downloadSource) if err != nil { - return errors.Wrap(err, "Failed to download project for devfile component") + return errors.Wrap(err, "failed to download project for devfile component") } } - err := co.EnvSpecificInfo.SetConfiguration("create", envinfo.ComponentSettings{Name: co.devfileMetadata.componentName, Namespace: co.devfileMetadata.componentNamespace}) + // Generate env file + err = co.EnvSpecificInfo.SetConfiguration("create", envinfo.ComponentSettings{Name: co.devfileMetadata.componentName, Namespace: co.devfileMetadata.componentNamespace}) if err != nil { - return errors.Wrap(err, "Failed to create env.yaml for devfile component") + return errors.Wrap(err, "failed to create env file for devfile component") } log.Italic("\nPlease use `odo push` command to create the component with source deployed") @@ -904,6 +1034,23 @@ func ensureAndLogProperResourceUsage(resource, resourceMin, resourceMax, resourc } } +func checkoutProject(sparseCheckoutDir, zipURL, path string) error { + + if sparseCheckoutDir != "" { + err := util.GetAndExtractZip(zipURL, path, sparseCheckoutDir) + if err != nil { + return errors.Wrap(err, "failed to download and extract project zip folder") + } + } else { + // extract project to current working directory + err := util.GetAndExtractZip(zipURL, path, "/") + if err != nil { + return errors.Wrap(err, "failed to download and extract project zip folder") + } + } + return nil +} + // NewCmdCreate implements the create odo command func NewCmdCreate(name, fullName string) *cobra.Command { co := NewCreateOptions() @@ -935,6 +1082,7 @@ func NewCmdCreate(name, fullName string) *cobra.Command { componentCreateCmd.Flags().StringVar(&co.devfileMetadata.downloadSource, "downloadSource", "", "Download sample project from devfile.") componentCreateCmd.Flags().Lookup("downloadSource").NoOptDefVal = defaultProjectName //Default value to pass to the flag if one is not specified. componentCreateCmd.Flags().StringVar(&co.devfileMetadata.devfileRegistry.Name, "registry", "", "Create devfile component from specific registry") + componentCreateCmd.Flags().StringVar(&co.devfileMetadata.devfilePath.value, "devfile", "", "Path to the user specify devfile") } componentCreateCmd.SetUsageTemplate(odoutil.CmdUsageTemplate) diff --git a/pkg/odo/cli/component/delete.go b/pkg/odo/cli/component/delete.go index acc28419276..3645ad86de5 100644 --- a/pkg/odo/cli/component/delete.go +++ b/pkg/odo/cli/component/delete.go @@ -57,7 +57,7 @@ func NewDeleteOptions() *DeleteOptions { // Complete completes log args func (do *DeleteOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { - do.devfilePath = filepath.Join(do.componentContext, do.devfilePath) + do.devfilePath = filepath.Join(do.componentContext, DevfilePath) // if experimental mode is enabled and devfile is present if experimental.IsExperimentalModeEnabled() && util.CheckPathExists(do.devfilePath) { @@ -260,11 +260,6 @@ func NewCmdDelete(name, fullName string) *cobra.Command { }, } - // enable devfile flag if experimental mode is enabled - if experimental.IsExperimentalModeEnabled() { - componentDeleteCmd.Flags().StringVar(&do.devfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") - } - componentDeleteCmd.Flags().BoolVarP(&do.componentForceDeleteFlag, "force", "f", false, "Delete component without prompting") componentDeleteCmd.Flags().BoolVarP(&do.componentDeleteAllFlag, "all", "a", false, "Delete component and local config") componentDeleteCmd.Flags().BoolVarP(&do.componentDeleteWaitFlag, "wait", "w", false, "Wait for complete deletion of component and its dependent") diff --git a/pkg/odo/cli/component/devfile.go b/pkg/odo/cli/component/devfile.go index 1a7c13004b2..1aacc74f0a4 100644 --- a/pkg/odo/cli/component/devfile.go +++ b/pkg/odo/cli/component/devfile.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/openshift/odo/pkg/envinfo" + "github.com/openshift/odo/pkg/machineoutput" "github.com/openshift/odo/pkg/odo/genericclioptions" "github.com/openshift/odo/pkg/odo/util/pushtarget" "github.com/openshift/odo/pkg/util" @@ -25,15 +26,34 @@ use of Che devfiles in odo for performing various odo operations. The devfile support progress can be tracked by: https://github.com/openshift/odo/issues/2467 -Please note that this feature is currently under development and the "--devfile" -flag is exposed only if the experimental mode in odo is enabled. +Please note that this feature is currently under development, +the feature will be available with experimental mode enabled. The behaviour of this feature is subject to change as development for this feature progresses. */ // DevfilePush has the logic to perform the required actions for a given devfile -func (po *PushOptions) DevfilePush() (err error) { +func (po *PushOptions) DevfilePush() error { + + // Wrap the push so that we can capture the error in JSON-only mode + err := po.devfilePushInner() + + if err != nil && log.IsJSON() { + eventLoggingClient := machineoutput.NewConsoleMachineEventLoggingClient() + eventLoggingClient.ReportError(err, machineoutput.TimestampNow()) + + // Suppress the error to prevent it from being output by the generic machine-readable handler (which will produce invalid JSON for our purposes) + err = nil + + // os.Exit(1) since we are suppressing the generic machine-readable handler's exit code logic + os.Exit(1) + } + + return err +} + +func (po *PushOptions) devfilePushInner() (err error) { // Parse devfile devObj, err := devfileParser.Parse(po.DevfilePath) if err != nil { @@ -62,7 +82,7 @@ func (po *PushOptions) DevfilePush() (err error) { platformContext = nil } else { kc := kubernetes.KubernetesContext{ - Namespace: po.namespace, + Namespace: po.KClient.Namespace, } platformContext = kc } @@ -81,23 +101,25 @@ func (po *PushOptions) DevfilePush() (err error) { DevfileInitCmd: strings.ToLower(po.devfileInitCommand), DevfileBuildCmd: strings.ToLower(po.devfileBuildCommand), DevfileRunCmd: strings.ToLower(po.devfileRunCommand), + DevfileDebugCmd: strings.ToLower(po.devfileDebugCommand), + Debug: po.debugRun, + DebugPort: po.EnvSpecificInfo.GetDebugPort(), } warnIfURLSInvalid(po.EnvSpecificInfo.GetURL()) + // Start or update the component err = devfileHandler.Push(pushParams) if err != nil { - log.Errorf( - "Failed to start component with name %s.\nError: %v", + err = errors.Errorf("Failed to start component with name %s. Error: %v", componentName, err, ) - os.Exit(1) + } else { + log.Infof("\nPushing devfile component %s", componentName) + log.Success("Changes successfully pushed to component") } - log.Infof("\nPushing devfile component %s", componentName) - log.Success("Changes successfully pushed to component") - return } diff --git a/pkg/odo/cli/component/list.go b/pkg/odo/cli/component/list.go index 1924abef29a..f525f6fd564 100644 --- a/pkg/odo/cli/component/list.go +++ b/pkg/odo/cli/component/list.go @@ -64,6 +64,10 @@ func (lo *ListOptions) Validate() (err error) { var project, app string + if len(lo.Application) != 0 && lo.allAppsFlag { + klog.V(4).Infof("either --app and --all-apps both provided or provided --all-apps in a folder has app, use --all-apps anyway") + } + if !util.CheckKubeConfigExist() { project = lo.LocalConfigInfo.GetProject() app = lo.LocalConfigInfo.GetApplication() diff --git a/pkg/odo/cli/component/push.go b/pkg/odo/cli/component/push.go index fee67195adc..4147a6b4b88 100644 --- a/pkg/odo/cli/component/push.go +++ b/pkg/odo/cli/component/push.go @@ -6,6 +6,7 @@ import ( "github.com/openshift/odo/pkg/envinfo" "github.com/openshift/odo/pkg/odo/util/pushtarget" + ktemplates "k8s.io/kubectl/pkg/util/templates" "github.com/openshift/odo/pkg/component" "github.com/openshift/odo/pkg/log" @@ -18,18 +19,24 @@ import ( "github.com/spf13/cobra" odoutil "github.com/openshift/odo/pkg/odo/util" - - ktemplates "k8s.io/kubectl/pkg/util/templates" ) -var pushCmdExample = ktemplates.Examples(` # Push source code to the current component +var pushCmdExample = (` # Push source code to the current component %[1]s -# Push data to the current component from the original source. +# Push data to the current component from the original source %[1]s # Push source code in ~/mycode to component called my-component %[1]s my-component --context ~/mycode + +# Push source code with custom devfile commands using --build-command and --run-command for experimental mode +%[1]s --build-command="mybuild" --run-command="myrun" + `) + +var pushCmdExampleExperimentalOnly = (` +# Output JSON events corresponding to devfile command execution and log text +%[1]s -o json `) // PushRecommendedCommandName is the recommended push command name @@ -46,7 +53,10 @@ type PushOptions struct { devfileInitCommand string devfileBuildCommand string devfileRunCommand string - namespace string + devfileDebugCommand string + debugRun bool + + namespace string } // NewPushOptions returns new instance of PushOptions @@ -57,17 +67,26 @@ func NewPushOptions() *PushOptions { } } +// CompleteDevfilePath completes the devfile path from context +func (po *PushOptions) CompleteDevfilePath() { + if len(po.DevfilePath) > 0 { + po.DevfilePath = filepath.Join(po.componentContext, po.DevfilePath) + } else { + po.DevfilePath = filepath.Join(po.componentContext, "devfile.yaml") + } +} + // Complete completes push args func (po *PushOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { - po.DevfilePath = filepath.Join(po.componentContext, po.DevfilePath) + po.CompleteDevfilePath() // if experimental mode is enabled and devfile is present if experimental.IsExperimentalModeEnabled() && util.CheckPathExists(po.DevfilePath) { - envinfo, err := envinfo.NewEnvSpecificInfo(po.componentContext) + envInfo, err := envinfo.NewEnvSpecificInfo(po.componentContext) if err != nil { return errors.Wrap(err, "unable to retrieve configuration information") } - po.EnvSpecificInfo = envinfo + po.EnvSpecificInfo = envInfo po.Context = genericclioptions.NewDevfileContext(cmd) if !pushtarget.IsPushTargetDocker() { @@ -151,17 +170,30 @@ func (po *PushOptions) Run() (err error) { func NewCmdPush(name, fullName string) *cobra.Command { po := NewPushOptions() + annotations := map[string]string{"command": "component"} + + pushCmdExampleText := pushCmdExample + + if experimental.IsExperimentalModeEnabled() { + // The '-o json' option should only appear in help output when experimental mode is enabled. + annotations["machineoutput"] = "json" + + // The '-o json' example should likewise only appear in experimental only. + pushCmdExampleText += pushCmdExampleExperimentalOnly + } + var pushCmd = &cobra.Command{ Use: fmt.Sprintf("%s [component name]", name), Short: "Push source code to a component", Long: `Push source code to a component.`, - Example: fmt.Sprintf(pushCmdExample, fullName), + Example: fmt.Sprintf(ktemplates.Examples(pushCmdExampleText), fullName), Args: cobra.MaximumNArgs(1), - Annotations: map[string]string{"command": "component"}, + Annotations: annotations, Run: func(cmd *cobra.Command, args []string) { genericclioptions.GenericRun(po, cmd, args) }, } + genericclioptions.AddContextFlag(pushCmd, &po.componentContext) pushCmd.Flags().BoolVar(&po.show, "show-log", false, "If enabled, logs will be shown when built") pushCmd.Flags().StringSliceVar(&po.ignores, "ignore", []string{}, "Files or folders to be ignored via glob expressions.") @@ -171,11 +203,12 @@ func NewCmdPush(name, fullName string) *cobra.Command { // enable devfile flag if experimental mode is enabled if experimental.IsExperimentalModeEnabled() { - pushCmd.Flags().StringVar(&po.DevfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") pushCmd.Flags().StringVar(&po.namespace, "namespace", "", "Namespace to push the component to") pushCmd.Flags().StringVar(&po.devfileInitCommand, "init-command", "", "Devfile Init Command to execute") pushCmd.Flags().StringVar(&po.devfileBuildCommand, "build-command", "", "Devfile Build Command to execute") pushCmd.Flags().StringVar(&po.devfileRunCommand, "run-command", "", "Devfile Run Command to execute") + pushCmd.Flags().BoolVar(&po.debugRun, "debug", false, "Runs the component in debug mode") + pushCmd.Flags().StringVar(&po.devfileDebugCommand, "debug-command", "", "Devfile Debug Command to execute") } //Adding `--project` flag diff --git a/pkg/odo/cli/component/watch.go b/pkg/odo/cli/component/watch.go index 3eacffac71c..71fbbe7abb2 100644 --- a/pkg/odo/cli/component/watch.go +++ b/pkg/odo/cli/component/watch.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/devfile/adapters" @@ -38,6 +39,9 @@ var watchExampleWithComponentName = ktemplates.Examples(` # Watch for changes i # Watch for changes in directory for component called frontend %[1]s frontend + +# Watch source code changes with custom devfile commands using --build-command and --run-command for experimental mode +%[1]s --build-command="mybuild" --run-command="myrun" `) var watchExample = ktemplates.Examples(` # Watch for changes in directory for current component @@ -62,6 +66,11 @@ type WatchOptions struct { EnvSpecificInfo *envinfo.EnvSpecificInfo + // devfile commands + devfileInitCommand string + devfileBuildCommand string + devfileRunCommand string + *genericclioptions.Context } @@ -72,7 +81,7 @@ func NewWatchOptions() *WatchOptions { // Complete completes watch args func (wo *WatchOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { - wo.devfilePath = filepath.Join(wo.componentContext, wo.devfilePath) + wo.devfilePath = filepath.Join(wo.componentContext, DevfilePath) // if experimental mode is enabled and devfile is present if experimental.IsExperimentalModeEnabled() && util.CheckPathExists(wo.devfilePath) { @@ -181,6 +190,10 @@ func (wo *WatchOptions) Validate() (err error) { cmpName := wo.LocalConfigInfo.GetName() appName := wo.LocalConfigInfo.GetApplication() + if len(wo.Application) != 0 { + appName = wo.Application + } + exists, err := component.Exists(wo.Client, cmpName, appName) if err != nil { return @@ -207,6 +220,10 @@ func (wo *WatchOptions) Run() (err error) { ExtChan: make(chan bool), DevfileWatchHandler: wo.devfileHandler.Push, Show: wo.show, + DevfileInitCmd: strings.ToLower(wo.devfileInitCommand), + DevfileBuildCmd: strings.ToLower(wo.devfileBuildCommand), + DevfileRunCmd: strings.ToLower(wo.devfileRunCommand), + EnvSpecificInfo: wo.EnvSpecificInfo, }, ) if err != nil { @@ -268,7 +285,9 @@ func NewCmdWatch(name, fullName string) *cobra.Command { // enable devfile flag if experimental mode is enabled if experimental.IsExperimentalModeEnabled() { - watchCmd.Flags().StringVar(&wo.devfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") + watchCmd.Flags().StringVar(&wo.devfileInitCommand, "init-command", "", "Devfile Init Command to execute") + watchCmd.Flags().StringVar(&wo.devfileBuildCommand, "build-command", "", "Devfile Build Command to execute") + watchCmd.Flags().StringVar(&wo.devfileRunCommand, "run-command", "", "Devfile Run Command to execute") } // Adding context flag diff --git a/pkg/odo/cli/debug/info.go b/pkg/odo/cli/debug/info.go index 62023fabbd8..ec4d89f5ec2 100644 --- a/pkg/odo/cli/debug/info.go +++ b/pkg/odo/cli/debug/info.go @@ -2,12 +2,12 @@ package debug import ( "fmt" - - "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/debug" "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/machineoutput" "github.com/openshift/odo/pkg/odo/genericclioptions" + "github.com/openshift/odo/pkg/odo/util/experimental" + "github.com/openshift/odo/pkg/util" "github.com/spf13/cobra" k8sgenclioptions "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/kubectl/pkg/util/templates" @@ -15,10 +15,13 @@ import ( // PortForwardOptions contains all the options for running the port-forward cli command. type InfoOptions struct { - Namespace string - PortForwarder *debug.DefaultPortForwarder + componentName string + applicationName string + Namespace string + PortForwarder *debug.DefaultPortForwarder *genericclioptions.Context - contextDir string + contextDir string + DevfilePath string } var ( @@ -43,12 +46,26 @@ func NewInfoOptions() *InfoOptions { // Complete completes all the required options for port-forward cmd. func (o *InfoOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { - o.Context = genericclioptions.NewContext(cmd) - cfg, err := config.NewLocalConfigInfo(o.contextDir) - o.LocalConfigInfo = cfg + if experimental.IsExperimentalModeEnabled() && util.CheckPathExists(o.DevfilePath) { + o.Context = genericclioptions.NewDevfileContext(cmd) + + // a small shortcut + env := o.Context.EnvSpecificInfo + + o.componentName = env.GetName() + o.Namespace = env.GetNamespace() + } else { + o.Context = genericclioptions.NewContext(cmd) + cfg := o.Context.LocalConfigInfo + o.LocalConfigInfo = cfg + + o.componentName = cfg.GetName() + o.applicationName = cfg.GetApplication() + o.Namespace = cfg.GetProject() + } // Using Discard streams because nothing important is logged - o.PortForwarder = debug.NewDefaultPortForwarder(cfg.GetName(), cfg.GetApplication(), o.Client, k8sgenclioptions.NewTestIOStreamsDiscard()) + o.PortForwarder = debug.NewDefaultPortForwarder(o.componentName, o.applicationName, o.Namespace, o.Client, o.KClient, k8sgenclioptions.NewTestIOStreamsDiscard()) return err } @@ -67,7 +84,7 @@ func (o InfoOptions) Run() error { log.Infof("Debug is running for the component on the local port : %v", debugFileInfo.Spec.LocalPort) } } else { - return fmt.Errorf("debug is not running for the component %v", o.LocalConfigInfo.GetName()) + return fmt.Errorf("debug is not running for the component %v", o.componentName) } return nil } @@ -87,6 +104,9 @@ func NewCmdInfo(name, fullName string) *cobra.Command { }, } genericclioptions.AddContextFlag(cmd, &opts.contextDir) + if experimental.IsExperimentalModeEnabled() { + cmd.Flags().StringVar(&opts.DevfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") + } return cmd } diff --git a/pkg/odo/cli/debug/portforward.go b/pkg/odo/cli/debug/portforward.go index 587a4e11517..34c0e15c8b9 100644 --- a/pkg/odo/cli/debug/portforward.go +++ b/pkg/odo/cli/debug/portforward.go @@ -6,6 +6,7 @@ import ( "github.com/openshift/odo/pkg/debug" "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/odo/genericclioptions" + "github.com/openshift/odo/pkg/odo/util/experimental" "github.com/openshift/odo/pkg/util" "net" "os" @@ -21,7 +22,10 @@ import ( // PortForwardOptions contains all the options for running the port-forward cli command. type PortForwardOptions struct { - Namespace string + componentName string + applicationName string + Namespace string + // PortPair is the combination of local and remote port in the format "local:remote" PortPair string @@ -34,6 +38,9 @@ type PortForwardOptions struct { // ReadChannel is used to receive status of port forwarding ( ready or not ready ) ReadyChannel chan struct{} *genericclioptions.Context + DevfilePath string + + isExperimental bool } var ( @@ -64,12 +71,32 @@ func NewPortForwardOptions() *PortForwardOptions { // Complete completes all the required options for port-forward cmd. func (o *PortForwardOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { - // this populates the LocalConfigInfo - o.Context = genericclioptions.NewContext(cmd) + var remotePort int + + o.isExperimental = experimental.IsExperimentalModeEnabled() - // a small shortcut - cfg := o.Context.LocalConfigInfo - remotePort := cfg.GetDebugPort() + if o.isExperimental && util.CheckPathExists(o.DevfilePath) { + o.Context = genericclioptions.NewDevfileContext(cmd) + + // a small shortcut + env := o.Context.EnvSpecificInfo + remotePort = env.GetDebugPort() + + o.componentName = env.GetName() + o.Namespace = env.GetNamespace() + + } else { + // this populates the LocalConfigInfo + o.Context = genericclioptions.NewContext(cmd) + + // a small shortcut + cfg := o.Context.LocalConfigInfo + remotePort = cfg.GetDebugPort() + + o.componentName = cfg.GetName() + o.applicationName = cfg.GetApplication() + o.Namespace = cfg.GetProject() + } // try to listen on the given local port and check if the port is free or not addressLook := "localhost:" + strconv.Itoa(o.localPort) @@ -97,7 +124,7 @@ func (o *PortForwardOptions) Complete(name string, cmd *cobra.Command, args []st o.PortPair = fmt.Sprintf("%d:%d", o.localPort, remotePort) // Using Discard streams because nothing important is logged - o.PortForwarder = debug.NewDefaultPortForwarder(cfg.GetName(), cfg.GetApplication(), o.Client, k8sgenclioptions.NewTestIOStreamsDiscard()) + o.PortForwarder = debug.NewDefaultPortForwarder(o.componentName, o.applicationName, o.Namespace, o.Client, o.KClient, k8sgenclioptions.NewTestIOStreamsDiscard()) o.StopChannel = make(chan struct{}, 1) o.ReadyChannel = make(chan struct{}) @@ -123,7 +150,7 @@ func (o PortForwardOptions) Run() error { syscall.SIGTERM, syscall.SIGQUIT) defer signal.Stop(signals) - defer os.RemoveAll(debug.GetDebugInfoFilePath(o.Client, o.LocalConfigInfo.GetName(), o.LocalConfigInfo.GetApplication())) + defer os.RemoveAll(debug.GetDebugInfoFilePath(o.componentName, o.applicationName, o.Namespace)) go func() { <-signals @@ -137,7 +164,7 @@ func (o PortForwardOptions) Run() error { return err } - return o.PortForwarder.ForwardPorts(o.PortPair, o.StopChannel, o.ReadyChannel) + return o.PortForwarder.ForwardPorts(o.PortPair, o.StopChannel, o.ReadyChannel, o.isExperimental) } // NewCmdPortForward implements the port-forward odo command @@ -154,6 +181,9 @@ func NewCmdPortForward(name, fullName string) *cobra.Command { }, } genericclioptions.AddContextFlag(cmd, &opts.contextDir) + if experimental.IsExperimentalModeEnabled() { + cmd.Flags().StringVar(&opts.DevfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") + } cmd.Flags().IntVarP(&opts.localPort, "local-port", "l", config.DefaultDebugPort, "Set the local port") return cmd diff --git a/pkg/odo/cli/project/delete.go b/pkg/odo/cli/project/delete.go index a9d25e7a77b..2a8506d1f09 100644 --- a/pkg/odo/cli/project/delete.go +++ b/pkg/odo/cli/project/delete.go @@ -35,6 +35,10 @@ type ProjectDeleteOptions struct { // generic context options common to all commands *genericclioptions.Context + + // wait is a boolean value to choose if we wait or not for + // our project to be deleted + wait bool } // NewProjectDeleteOptions creates a ProjectDeleteOptions instance @@ -62,6 +66,9 @@ func (pdo *ProjectDeleteOptions) Validate() (err error) { // Run runs the project delete command func (pdo *ProjectDeleteOptions) Run() (err error) { + // Create the "spinner" + s := &log.Status{} + // This to set the project in the file and runtime err = project.SetCurrent(pdo.Context.Client, pdo.projectName) if err != nil { @@ -77,15 +84,23 @@ func (pdo *ProjectDeleteOptions) Run() (err error) { if log.IsJSON() || (pdo.projectForceDeleteFlag || ui.Proceed(fmt.Sprintf("Are you sure you want to delete project %v", pdo.projectName))) { successMessage := fmt.Sprintf("Deleted project : %v", pdo.projectName) - err := project.Delete(pdo.Context.Client, pdo.projectName) + // If the --wait parameter has been passed, we add a spinner.. + if pdo.wait { + s = log.Spinner("Waiting for project to be deleted") + defer s.End(false) + } + + err := project.Delete(pdo.Context.Client, pdo.projectName, pdo.wait) if err != nil { return err } + s.End(true) if log.IsJSON() { project.MachineReadableSuccessOutput(pdo.projectName, successMessage) } else { log.Success(successMessage) + log.Warning("Warning! Projects are deleted from the cluster asynchronously. Odo does its best to delete the project. Due to multi-tenant clusters, the project may still exist on a different node.") } return nil } @@ -109,6 +124,7 @@ func NewCmdProjectDelete(name, fullName string) *cobra.Command { }, } + projectDeleteCmd.Flags().BoolVarP(&o.wait, "wait", "w", false, "Wait until the project has been completely deleted") projectDeleteCmd.Flags().BoolVarP(&o.projectForceDeleteFlag, "force", "f", false, "Delete project without prompting") return projectDeleteCmd diff --git a/pkg/odo/cli/registry/add.go b/pkg/odo/cli/registry/add.go index ca891b3c391..6d64237220f 100644 --- a/pkg/odo/cli/registry/add.go +++ b/pkg/odo/cli/registry/add.go @@ -25,7 +25,7 @@ var ( addExample = ktemplates.Examples(`# Add devfile registry %[1]s CheRegistry https://che-devfile-registry.openshift.io - %[1]s CheRegistryFromGitHub https://raw.githubusercontent.com/eclipse/che-devfile-registry/master + %[1]s RegistryFromGitHub https://github.com/elsony/devfile-registry `) ) diff --git a/pkg/odo/cli/registry/list.go b/pkg/odo/cli/registry/list.go index dac98ba7414..64bb85d4402 100644 --- a/pkg/odo/cli/registry/list.go +++ b/pkg/odo/cli/registry/list.go @@ -54,9 +54,13 @@ func (o *ListOptions) Run() (err error) { util.LogErrorAndExit(err, "") } + registryList := cfg.OdoSettings.RegistryList + if len(*registryList) == 0 { + return fmt.Errorf("No devfile registries added to the configuration. Refer `odo registry add -h` to add one") + } w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) fmt.Fprintln(w, "NAME", "\t", "URL") - o.printRegistryList(w, cfg.OdoSettings.RegistryList) + o.printRegistryList(w, registryList) w.Flush() return } diff --git a/pkg/odo/cli/service/list.go b/pkg/odo/cli/service/list.go index cd1560d0498..efdef93a026 100644 --- a/pkg/odo/cli/service/list.go +++ b/pkg/odo/cli/service/list.go @@ -74,9 +74,16 @@ func (o *ServiceListOptions) Run() (err error) { return err } - w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) + if len(list) == 0 { + return fmt.Errorf("No operator backed services found in the namesapce") + } + + if log.IsJSON() { + machineoutput.OutputSuccess(list) + return + } else { + w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) - if len(list) > 0 { fmt.Fprintln(w, "NAME", "\t", "TYPE", "\t", "AGE") for _, item := range list { @@ -84,11 +91,9 @@ func (o *ServiceListOptions) Run() (err error) { fmt.Fprintln(w, item.GetName(), "\t", item.GetKind(), "\t", duration) } - } else { - fmt.Fprintln(w, "No operator backed services found in the namesapce") - } + w.Flush() - w.Flush() + } return err } diff --git a/pkg/odo/cli/url/create.go b/pkg/odo/cli/url/create.go index 84120b39456..45c6a8966df 100644 --- a/pkg/odo/cli/url/create.go +++ b/pkg/odo/cli/url/create.go @@ -53,8 +53,8 @@ var ( # Create a URL of ingress kind for the current component with a host (using CRC as an example) %[1]s --host apps-crc.testing --ingress - # Create a secured URL for the current component with a specific host (using CRC as an example) - %[1]s --host apps-crc.testing --secured + # Create a secure URL for the current component with a specific host (using CRC as an example) + %[1]s --host apps-crc.testing --secure `) urlCreateExampleDocker = ktemplates.Examples(` # Create a URL with a specific name by automatically detecting the port used by the component @@ -92,6 +92,8 @@ func NewURLCreateOptions() *URLCreateOptions { // Complete completes URLCreateOptions after they've been Created func (o *URLCreateOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { + o.DevfilePath = clicomponent.DevfilePath + if experimental.IsExperimentalModeEnabled() && util.CheckPathExists(o.DevfilePath) { o.Context = genericclioptions.NewDevfileContext(cmd) } else if o.now { @@ -321,7 +323,6 @@ func NewCmdURLCreate(name, fullName string) *cobra.Command { urlCreateCmd.Flags().BoolVar(&o.wantIngress, "ingress", false, "Creates an ingress instead of Route on OpenShift clusters") urlCreateCmd.Example = fmt.Sprintf(urlCreateExampleExperimental, fullName) } - urlCreateCmd.Flags().StringVar(&o.DevfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") } else { urlCreateCmd.Flags().BoolVarP(&o.secureURL, "secure", "", false, "creates a secure https url") urlCreateCmd.Example = fmt.Sprintf(urlCreateExample, fullName) diff --git a/pkg/odo/cli/url/delete.go b/pkg/odo/cli/url/delete.go index 569e70dd429..ee4eb934121 100644 --- a/pkg/odo/cli/url/delete.go +++ b/pkg/odo/cli/url/delete.go @@ -26,7 +26,7 @@ var ( // URLDeleteOptions encapsulates the options for the odo url delete command type URLDeleteOptions struct { - *clicomponent.CommonPushOptions + *clicomponent.PushOptions urlName string urlForceDeleteFlag bool now bool @@ -34,18 +34,21 @@ type URLDeleteOptions struct { // NewURLDeleteOptions creates a new URLDeleteOptions instance func NewURLDeleteOptions() *URLDeleteOptions { - return &URLDeleteOptions{CommonPushOptions: clicomponent.NewCommonPushOptions()} + return &URLDeleteOptions{PushOptions: clicomponent.NewPushOptions()} } // Complete completes URLDeleteOptions after they've been Deleted func (o *URLDeleteOptions) Complete(name string, cmd *cobra.Command, args []string) (err error) { + if experimental.IsExperimentalModeEnabled() { + o.Context = genericclioptions.NewDevfileContext(cmd) o.urlName = args[0] err = o.InitEnvInfoFromContext() if err != nil { return err } + o.CompleteDevfilePath() } else { if o.now { o.Context = genericclioptions.NewContextCreatingAppIfNeeded(cmd) @@ -114,8 +117,15 @@ func (o *URLDeleteOptions) Run() (err error) { if err != nil { return err } - log.Successf("URL %s removed from the env file", o.urlName) - log.Italic("\nTo delete the URL on the cluster, please use `odo push`") + if o.now { + err = o.DevfilePush() + if err != nil { + return err + } + } else { + log.Successf("URL %s removed from the env file", o.urlName) + log.Italic("\nTo delete the URL on the cluster, please use `odo push`") + } } else { err = o.LocalConfigInfo.DeleteURL(o.urlName) if err != nil { @@ -152,6 +162,7 @@ func NewCmdURLDelete(name, fullName string) *cobra.Command { } urlDeleteCmd.Flags().BoolVarP(&o.urlForceDeleteFlag, "force", "f", false, "Delete url without prompting") o.AddContextFlag(urlDeleteCmd) + urlDeleteCmd.Flags().StringVar(&o.DevfilePath, "devfile", "./devfile.yaml", "Path to a devfile.yaml") genericclioptions.AddNowFlag(urlDeleteCmd, &o.now) completion.RegisterCommandHandler(urlDeleteCmd, completion.URLCompletionHandler) completion.RegisterCommandFlagHandler(urlDeleteCmd, "context", completion.FileCompletionHandler) diff --git a/pkg/odo/cli/url/describe.go b/pkg/odo/cli/url/describe.go index c941e616775..7f6251ad931 100644 --- a/pkg/odo/cli/url/describe.go +++ b/pkg/odo/cli/url/describe.go @@ -7,6 +7,7 @@ import ( "text/tabwriter" "github.com/openshift/odo/pkg/envinfo" + "github.com/openshift/odo/pkg/occlient" "github.com/openshift/odo/pkg/odo/util/pushtarget" routev1 "github.com/openshift/api/route/v1" @@ -103,19 +104,40 @@ func (o *URLDescribeOptions) Run() (err error) { } } } else { - u, err := url.GetIngress(o.KClient, o.EnvSpecificInfo, o.url) + componentName := o.EnvSpecificInfo.GetName() + oclient, err := occlient.New() + if err != nil { + return err + } + oclient.Namespace = o.KClient.Namespace + routeSupported, err := oclient.IsRouteSupported() + if err != nil { + return err + } + u, err := url.GetIngressOrRoute(oclient, o.KClient, o.EnvSpecificInfo, o.url, componentName, routeSupported) if err != nil { return err } if log.IsJSON() { machineoutput.OutputSuccess(u) } else { - tabWriterURL := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) - fmt.Fprintln(tabWriterURL, "NAME", "\t", "URL", "\t", "PORT") + fmt.Fprintln(tabWriterURL, "NAME", "\t", "STATE", "\t", "URL", "\t", "PORT", "\t", "SECURE", "\t", "KIND") - fmt.Fprintln(tabWriterURL, u.Name, "\t", url.GetURLString(url.GetProtocol(routev1.Route{}, u, experimental.IsExperimentalModeEnabled()), "", u.Spec.Rules[0].Host, experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend.ServicePort.IntVal) + // are there changes between local and cluster states? + outOfSync := false + if u.Spec.Kind == envinfo.ROUTE { + fmt.Fprintln(tabWriterURL, u.Name, "\t", u.Status.State, "\t", url.GetURLString(u.Spec.Protocol, u.Spec.Host, "", experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Port, "\t", u.Spec.Secure, "\t", u.Spec.Kind) + } else { + fmt.Fprintln(tabWriterURL, u.Name, "\t", u.Status.State, "\t", url.GetURLString(url.GetProtocol(routev1.Route{}, url.ConvertIngressURLToIngress(u, componentName)), "", u.Spec.Host, experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Port, "\t", u.Spec.Secure, "\t", u.Spec.Kind) + } + if u.Status.State != url.StateTypePushed { + outOfSync = true + } tabWriterURL.Flush() + if outOfSync { + log.Info("There are local changes. Please run 'odo push'.") + } } } } else { diff --git a/pkg/odo/cli/url/list.go b/pkg/odo/cli/url/list.go index 855bd0e0d57..9c1f682c946 100644 --- a/pkg/odo/cli/url/list.go +++ b/pkg/odo/cli/url/list.go @@ -6,13 +6,14 @@ import ( "strconv" "text/tabwriter" + routev1 "github.com/openshift/api/route/v1" "github.com/openshift/odo/pkg/envinfo" + "github.com/openshift/odo/pkg/occlient" "github.com/openshift/odo/pkg/odo/util/pushtarget" "github.com/openshift/odo/pkg/odo/util/experimental" - routev1 "github.com/openshift/api/route/v1" "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/lclient" "github.com/openshift/odo/pkg/log" @@ -84,7 +85,7 @@ func (o *URLListOptions) Run() (err error) { machineoutput.OutputSuccess(urls) } else { if len(urls.Items) == 0 { - return fmt.Errorf("no URLs found for component %v", componentName) + return fmt.Errorf("no URLs found for component %v. Refer `odo url create -h` to add one", componentName) } log.Infof("Found the following URLs for component %v", componentName) @@ -114,36 +115,40 @@ func (o *URLListOptions) Run() (err error) { } } else { componentName := o.EnvSpecificInfo.GetName() - // TODO: Need to list all local and pushed ingresses - // issue to track: https://github.com/openshift/odo/issues/2787 - urls, err := url.ListPushedIngress(o.KClient, componentName) + oclient, err := occlient.New() + if err != nil { + return err + } + oclient.Namespace = o.KClient.Namespace + routeSupported, err := oclient.IsRouteSupported() + if err != nil { + return err + } + urls, err := url.ListIngressAndRoute(oclient, o.KClient, o.EnvSpecificInfo, componentName, routeSupported) if err != nil { return err } - localUrls := o.EnvSpecificInfo.GetURL() if log.IsJSON() { machineoutput.OutputSuccess(urls) } else { if len(urls.Items) == 0 { - return fmt.Errorf("no URLs found for component %v", componentName) + return fmt.Errorf("no URLs found for component %v. Refer `odo url create -h` to add one", componentName) } log.Infof("Found the following URLs for component %v", componentName) tabWriterURL := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) - fmt.Fprintln(tabWriterURL, "NAME", "\t", "URL", "\t", "PORT", "\t", "SECURE") + fmt.Fprintln(tabWriterURL, "NAME", "\t", "STATE", "\t", "URL", "\t", "PORT", "\t", "SECURE", "\t", "KIND") // are there changes between local and cluster states? outOfSync := false - for _, i := range localUrls { - var present bool - for _, u := range urls.Items { - if i.Name == u.Name { - fmt.Fprintln(tabWriterURL, u.Name, "\t", url.GetURLString(url.GetProtocol(routev1.Route{}, u, experimental.IsExperimentalModeEnabled()), "", u.Spec.Rules[0].Host, experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Backend.ServicePort.IntVal, "\t", u.Spec.TLS != nil) - present = true - } + for _, u := range urls.Items { + if u.Spec.Kind == envinfo.ROUTE { + fmt.Fprintln(tabWriterURL, u.Name, "\t", u.Status.State, "\t", url.GetURLString(u.Spec.Protocol, u.Spec.Host, "", experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Port, "\t", u.Spec.Secure, "\t", u.Spec.Kind) + } else { + fmt.Fprintln(tabWriterURL, u.Name, "\t", u.Status.State, "\t", url.GetURLString(url.GetProtocol(routev1.Route{}, url.ConvertIngressURLToIngress(u, o.EnvSpecificInfo.GetName())), "", u.Spec.Host, experimental.IsExperimentalModeEnabled()), "\t", u.Spec.Port, "\t", u.Spec.Secure, "\t", u.Spec.Kind) } - if !present { - fmt.Fprintln(tabWriterURL, i.Name, "\t", "", "\t", i.Port) + if u.Status.State != url.StateTypePushed { + outOfSync = true } } tabWriterURL.Flush() diff --git a/pkg/odo/util/completion/completionhandlers.go b/pkg/odo/util/completion/completionhandlers.go index 839e6884e34..bc7788d992f 100644 --- a/pkg/odo/util/completion/completionhandlers.go +++ b/pkg/odo/util/completion/completionhandlers.go @@ -2,15 +2,12 @@ package completion import ( "fmt" - "strings" - - "github.com/openshift/odo/pkg/config" - appsv1 "github.com/openshift/api/apps/v1" "github.com/openshift/odo/pkg/application" "github.com/openshift/odo/pkg/catalog" "github.com/openshift/odo/pkg/component" componentlabels "github.com/openshift/odo/pkg/component/labels" + "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/odo/genericclioptions" "github.com/openshift/odo/pkg/service" "github.com/openshift/odo/pkg/storage" @@ -18,6 +15,7 @@ import ( "github.com/openshift/odo/pkg/util" "github.com/posener/complete" "github.com/spf13/cobra" + "strings" ) // ServiceCompletionHandler provides service name completion for the current project and application @@ -271,20 +269,37 @@ var StorageUnMountCompletionHandler = func(cmd *cobra.Command, args parsedArgs, // CreateCompletionHandler provides component type completion in odo create command var CreateCompletionHandler = func(cmd *cobra.Command, args parsedArgs, context *genericclioptions.Context) (completions []string) { completions = make([]string, 0) - catalogList, err := catalog.ListComponents(context.Client) - if err != nil { - return completions - } - - for _, builder := range catalogList.Items { - // we found the builder name in the list which means - // that the builder name has been already selected by the user so no need to suggest more - if args.commands[builder.Name] { - return nil + comps := &completions + found := false + + tasks := util.NewConcurrentTasks(2) + tasks.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + catalogList, _ := catalog.ListComponents(context.Client) + for _, builder := range catalogList.Items { + if args.commands[builder.Name] { + found = true + return + } + if len(builder.Spec.NonHiddenTags) > 0 { + *comps = append(*comps, builder.Name) + } } - completions = append(completions, builder.Name) - } + }}) + tasks.Add(util.ConcurrentTask{ToRun: func(errChannel chan error) { + components, _ := catalog.ListDevfileComponents("") + for _, devfile := range components.Items { + if args.commands[devfile.Name] { + found = true + return + } + *comps = append(*comps, devfile.Name) + } + }}) + _ = tasks.Run() + if found { + return nil + } return completions } diff --git a/pkg/preference/preference.go b/pkg/preference/preference.go index 5f6239ad1a5..838cfe7639f 100644 --- a/pkg/preference/preference.go +++ b/pkg/preference/preference.go @@ -77,7 +77,7 @@ const ( DefaultDevfileRegistryName = "DefaultDevfileRegistry" // DefaultDevfileRegistryURL is the URL of default devfile registry - DefaultDevfileRegistryURL = "https://raw.githubusercontent.com/elsony/devfile-registry/master" + DefaultDevfileRegistryURL = "https://github.com/elsony/devfile-registry" ) // TimeoutSettingDescription is human-readable description for the timeout setting diff --git a/pkg/project/project.go b/pkg/project/project.go index a12a740563e..6a0d510f2c1 100644 --- a/pkg/project/project.go +++ b/pkg/project/project.go @@ -4,7 +4,6 @@ import ( "github.com/openshift/odo/pkg/machineoutput" "github.com/pkg/errors" - "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/occlient" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,18 +44,16 @@ func Create(client *occlient.Client, projectName string, wait bool) error { } // Delete deletes the project with name projectName and returns errors if any -func Delete(client *occlient.Client, projectName string) error { - // Loading spinner - s := log.Spinnerf("Deleting project %s", projectName) - defer s.End(false) +func Delete(client *occlient.Client, projectName string, wait bool) error { + if projectName == "" { + return errors.Errorf("no project name given") + } // Delete the requested project - err := client.DeleteProject(projectName) + err := client.DeleteProject(projectName, wait) if err != nil { return errors.Wrap(err, "unable to delete project") } - - s.End(true) return nil } diff --git a/pkg/project/project_test.go b/pkg/project/project_test.go index 895dbc398b2..9116db57b6d 100644 --- a/pkg/project/project_test.go +++ b/pkg/project/project_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + projectv1 "github.com/openshift/api/project/v1" v1 "github.com/openshift/api/project/v1" "github.com/openshift/odo/pkg/occlient" @@ -108,16 +109,19 @@ func TestDelete(t *testing.T) { tests := []struct { name string wantErr bool + wait bool projectName string }{ { - name: "Test project delete for multiple projects", + name: "Case 1: Test project delete for multiple projects", wantErr: false, + wait: false, projectName: "prj2", }, { - name: "Test delete the only remaining project", + name: "Case 2: Test delete the only remaining project", wantErr: false, + wait: false, projectName: "testing", }, } @@ -162,19 +166,22 @@ func TestDelete(t *testing.T) { return true, nil, nil }) - go func() { - fkWatch.Delete(testingutil.FakeProjectStatus(corev1.NamespacePhase(""), tt.projectName)) - }() + // We pass in the fakeProject in order to avoid race conditions with multiple go routines + fakeProject := testingutil.FakeProjectStatus(corev1.NamespacePhase(""), tt.projectName) + go func(project *projectv1.Project) { + fkWatch.Delete(project) + }(fakeProject) + fakeClientSet.ProjClientset.PrependWatchReactor("projects", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { return true, fkWatch, nil }) // The function we are testing - err := Delete(client, tt.projectName) + err := Delete(client, tt.projectName, tt.wait) if err == nil && !tt.wantErr { - if len(fakeClientSet.ProjClientset.Actions()) != 2 { - t.Errorf("expected 2 ProjClientSet.Actions() in Project Delete, got: %v", len(fakeClientSet.ProjClientset.Actions())) + if len(fakeClientSet.ProjClientset.Actions()) != 1 { + t.Errorf("expected 1 ProjClientSet.Actions() in Project Delete, got: %v", len(fakeClientSet.ProjClientset.Actions())) } } diff --git a/pkg/sync/adapter.go b/pkg/sync/adapter.go index cb6d47f3557..ded0c0873ea 100644 --- a/pkg/sync/adapter.go +++ b/pkg/sync/adapter.go @@ -151,7 +151,7 @@ func (a Adapter) pushLocal(path string, files []string, delFiles []string, isFor klog.V(4).Infof("Creating %s on the remote container if it doesn't already exist", syncFolder) cmdArr := getCmdToCreateSyncFolder(syncFolder) - err = exec.ExecuteCommand(a.Client, compInfo, cmdArr, false) + err = exec.ExecuteCommand(a.Client, compInfo, cmdArr, false, nil, nil) if err != nil { return err } @@ -160,7 +160,7 @@ func (a Adapter) pushLocal(path string, files []string, delFiles []string, isFor if len(delFiles) > 0 { cmdArr := getCmdToDeleteFiles(delFiles, syncFolder) - err = exec.ExecuteCommand(a.Client, compInfo, cmdArr, false) + err = exec.ExecuteCommand(a.Client, compInfo, cmdArr, false, nil, nil) if err != nil { return err } @@ -197,14 +197,14 @@ func getSyncFolder(projects []versionsCommon.DevfileProject) (string, error) { project := projects[0] // If the clonepath is set to a value, set it to be the sync folder // As some devfiles rely on the code being synced to the folder in the clonepath - if project.ClonePath != nil { - if strings.HasPrefix(*project.ClonePath, "/") { + if project.ClonePath != "" { + if strings.HasPrefix(project.ClonePath, "/") { return "", fmt.Errorf("the clonePath in the devfile must be a relative path") } - if strings.Contains(*project.ClonePath, "..") { + if strings.Contains(project.ClonePath, "..") { return "", fmt.Errorf("the clonePath in the devfile cannot escape the projects root. Don't use .. to try and do that") } - return filepath.ToSlash(filepath.Join(kclient.OdoSourceVolumeMount, *project.ClonePath)), nil + return filepath.ToSlash(filepath.Join(kclient.OdoSourceVolumeMount, project.ClonePath)), nil } return filepath.ToSlash(filepath.Join(kclient.OdoSourceVolumeMount, projects[0].Name)), nil } diff --git a/pkg/sync/adapter_test.go b/pkg/sync/adapter_test.go index 84c07fe4aa0..1c8c89ee74b 100644 --- a/pkg/sync/adapter_test.go +++ b/pkg/sync/adapter_test.go @@ -40,8 +40,7 @@ func TestGetSyncFolder(t *testing.T) { projects: []versionsCommon.DevfileProject{ { Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Git: &versionsCommon.Git{ Location: projectRepos[0], }, }, @@ -54,15 +53,19 @@ func TestGetSyncFolder(t *testing.T) { projects: []versionsCommon.DevfileProject{ { Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Git: &versionsCommon.Git{ Location: projectRepos[0], }, }, { Name: projectNames[1], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Github: &versionsCommon.Github{ + Location: projectRepos[1], + }, + }, + { + Name: projectNames[1], + Zip: &versionsCommon.Zip{ Location: projectRepos[1], }, }, @@ -74,10 +77,9 @@ func TestGetSyncFolder(t *testing.T) { name: "Case 4: Clone path set", projects: []versionsCommon.DevfileProject{ { - ClonePath: &projectClonePath, + ClonePath: projectClonePath, Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Zip: &versionsCommon.Zip{ Location: projectRepos[0], }, }, @@ -89,10 +91,9 @@ func TestGetSyncFolder(t *testing.T) { name: "Case 5: Invalid clone path, set with absolute path", projects: []versionsCommon.DevfileProject{ { - ClonePath: &invalidClonePaths[0], + ClonePath: invalidClonePaths[0], Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Github: &versionsCommon.Github{ Location: projectRepos[0], }, }, @@ -104,10 +105,9 @@ func TestGetSyncFolder(t *testing.T) { name: "Case 6: Invalid clone path, starts with ..", projects: []versionsCommon.DevfileProject{ { - ClonePath: &invalidClonePaths[1], + ClonePath: invalidClonePaths[1], Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Git: &versionsCommon.Git{ Location: projectRepos[0], }, }, @@ -119,10 +119,9 @@ func TestGetSyncFolder(t *testing.T) { name: "Case 7: Invalid clone path, contains ..", projects: []versionsCommon.DevfileProject{ { - ClonePath: &invalidClonePaths[2], + ClonePath: invalidClonePaths[2], Name: projectNames[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, + Zip: &versionsCommon.Zip{ Location: projectRepos[0], }, }, @@ -208,7 +207,6 @@ func TestGetCmdToDeleteFiles(t *testing.T) { func TestSyncFiles(t *testing.T) { testComponentName := "test" - componentType := versionsCommon.DevfileComponentTypeDockerimage fakeClient := lclient.FakeNew() fakeErrorClient := lclient.FakeErrorNew() @@ -308,7 +306,7 @@ func TestSyncFiles(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := parser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: componentType, + Components: []versionsCommon.DevfileComponent{}, }, } @@ -337,7 +335,6 @@ func TestSyncFiles(t *testing.T) { func TestPushLocal(t *testing.T) { testComponentName := "test" - componentType := versionsCommon.DevfileComponentTypeDockerimage // create a temp dir for the file indexer directory, err := ioutil.TempDir("", "") @@ -429,7 +426,7 @@ func TestPushLocal(t *testing.T) { t.Run(tt.name, func(t *testing.T) { devObj := parser.DevfileObj{ Data: testingutil.TestDevfileData{ - ComponentType: componentType, + Components: []versionsCommon.DevfileComponent{}, }, } diff --git a/pkg/testingutil/devfile.go b/pkg/testingutil/devfile.go index 303d8b4c3a3..c9b7d9d14bb 100644 --- a/pkg/testingutil/devfile.go +++ b/pkg/testingutil/devfile.go @@ -1,15 +1,14 @@ package testingutil import ( + "github.com/openshift/odo/pkg/devfile/parser/data/common" versionsCommon "github.com/openshift/odo/pkg/devfile/parser/data/common" ) // TestDevfileData is a convenience data type used to mock up a devfile configuration type TestDevfileData struct { - ComponentType versionsCommon.DevfileComponentType - CommandActions []versionsCommon.DevfileCommandAction - MissingInitCommand bool - MissingBuildCommand bool + Components []versionsCommon.DevfileComponent + ExecCommands []versionsCommon.Exec } // GetComponents is a mock function to get the components from a devfile @@ -17,54 +16,34 @@ func (d TestDevfileData) GetComponents() []versionsCommon.DevfileComponent { return d.GetAliasedComponents() } +// GetEvents is a mock function to get events from devfile +func (d TestDevfileData) GetEvents() versionsCommon.DevfileEvents { + return versionsCommon.DevfileEvents{} +} + +// GetMetadata is a mock function to get metadata from devfile +func (d TestDevfileData) GetMetadata() versionsCommon.DevfileMetadata { + return versionsCommon.DevfileMetadata{} +} + +// GetParent is a mock function to get parent from devfile +func (d TestDevfileData) GetParent() versionsCommon.DevfileParent { + return versionsCommon.DevfileParent{} +} + // GetAliasedComponents is a mock function to get the components that have an alias from a devfile func (d TestDevfileData) GetAliasedComponents() []versionsCommon.DevfileComponent { - alias := [...]string{"alias1", "alias2"} - image := [...]string{"docker.io/maven:latest", "docker.io/hello-world:latest"} - memoryLimit := "128Mi" - volumeName := [...]string{"myvolume1", "myvolume2"} - volumePath := [...]string{"/my/volume/mount/path1", "/my/volume/mount/path2"} - return []versionsCommon.DevfileComponent{ - { - Alias: &alias[0], - DevfileComponentDockerimage: versionsCommon.DevfileComponentDockerimage{ - Image: &image[0], - Command: []string{}, - Args: []string{}, - Env: []versionsCommon.DockerimageEnv{}, - MemoryLimit: &memoryLimit, - Volumes: []versionsCommon.DockerimageVolume{ - { - Name: &volumeName[0], - ContainerPath: &volumePath[0], - }, - }, - }, - Type: d.ComponentType, - MountSources: true, - }, - { - Alias: &alias[1], - DevfileComponentDockerimage: versionsCommon.DevfileComponentDockerimage{ - Image: &image[1], - Command: []string{}, - Args: []string{}, - Env: []versionsCommon.DockerimageEnv{}, - MemoryLimit: &memoryLimit, - Volumes: []versionsCommon.DockerimageVolume{ - { - Name: &volumeName[0], - ContainerPath: &volumePath[0], - }, - { - Name: &volumeName[1], - ContainerPath: &volumePath[1], - }, - }, - }, - Type: d.ComponentType, - }, + var aliasedComponents = []common.DevfileComponent{} + + for _, comp := range d.Components { + if comp.Container != nil { + if comp.Container.Name != "" { + aliasedComponents = append(aliasedComponents, comp) + } + } } + return aliasedComponents + } // GetProjects is a mock function to get the components that have an alias from a devfile @@ -72,57 +51,75 @@ func (d TestDevfileData) GetProjects() []versionsCommon.DevfileProject { projectName := [...]string{"test-project", "anotherproject"} clonePath := [...]string{"/test-project", "/anotherproject"} sourceLocation := [...]string{"https://github.com/someproject/test-project.git", "https://github.com/another/project.git"} - return []versionsCommon.DevfileProject{ - { - ClonePath: &clonePath[0], - Name: projectName[0], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, - Location: sourceLocation[0], - }, + + project1 := versionsCommon.DevfileProject{ + ClonePath: clonePath[0], + Name: projectName[0], + Git: &versionsCommon.Git{ + Location: sourceLocation[0], }, - { - ClonePath: &clonePath[1], - Name: projectName[1], - Source: versionsCommon.DevfileProjectSource{ - Type: versionsCommon.DevfileProjectTypeGit, - Location: sourceLocation[1], - }, + } + + project2 := versionsCommon.DevfileProject{ + ClonePath: clonePath[1], + Name: projectName[1], + Git: &versionsCommon.Git{ + Location: sourceLocation[1], }, } + return []versionsCommon.DevfileProject{project1, project2} + } // GetCommands is a mock function to get the commands from a devfile func (d TestDevfileData) GetCommands() []versionsCommon.DevfileCommand { - commandName := [...]string{"devinit", "devbuild", "devrun", "customcommand"} - commands := []versionsCommon.DevfileCommand{ - { - Name: commandName[2], - Actions: d.CommandActions, - }, - { - Name: commandName[3], - Actions: d.CommandActions, - }, - } - if !d.MissingInitCommand { - commands = append(commands, versionsCommon.DevfileCommand{ - Name: commandName[0], - Actions: d.CommandActions, - }) - } - if !d.MissingBuildCommand { - commands = append(commands, versionsCommon.DevfileCommand{ - Name: commandName[1], - Actions: d.CommandActions, - }) + var commands []versionsCommon.DevfileCommand + + for i := range d.ExecCommands { + commands = append(commands, versionsCommon.DevfileCommand{Exec: &d.ExecCommands[i]}) } return commands + } // Validate is a mock validation that always validates without error func (d TestDevfileData) Validate() error { return nil } + +// GetFakeComponent returns fake component for testing +func GetFakeComponent(name string) versionsCommon.DevfileComponent { + image := "docker.io/maven:latest" + memoryLimit := "128Mi" + volumeName := "myvolume1" + volumePath := "/my/volume/mount/path1" + + return versionsCommon.DevfileComponent{ + Container: &versionsCommon.Container{ + Name: name, + Image: image, + Env: []versionsCommon.Env{}, + MemoryLimit: memoryLimit, + VolumeMounts: []versionsCommon.VolumeMount{{ + Name: volumeName, + Path: volumePath, + }}, + MountSources: true, + }} + +} + +func GetFakeExecRunCommands() []versionsCommon.Exec { + return []versionsCommon.Exec{ + { + CommandLine: "ls -a", + Component: "alias1", + Group: &versionsCommon.Group{ + Kind: versionsCommon.RunCommandGroupType, + }, + WorkingDir: "/root", + }, + } +} diff --git a/pkg/url/types.go b/pkg/url/types.go index 0b7f79f580a..c8c19a25954 100644 --- a/pkg/url/types.go +++ b/pkg/url/types.go @@ -15,13 +15,13 @@ type URL struct { // URLSpec is type URLSpec struct { - Host string `json:"host,omitempty"` - Protocol string `json:"protocol,omitempty"` - Port int `json:"port,omitempty"` - Secure bool `json:"secure"` - urlKind envinfo.URLKind - tLSSecret string - ExternalPort int `json:"externalport,omitempty"` + Host string `json:"host,omitempty"` + Protocol string `json:"protocol,omitempty"` + Port int `json:"port,omitempty"` + Secure bool `json:"secure"` + Kind envinfo.URLKind `json:"kind,omitempty"` + TLSSecret string `json:"tlssecret,omitempty"` + ExternalPort int `json:"externalport,omitempty"` } // AppList is a list of applications diff --git a/pkg/url/url.go b/pkg/url/url.go index a50c9b1a0ec..1b03cfb2a7d 100644 --- a/pkg/url/url.go +++ b/pkg/url/url.go @@ -20,7 +20,6 @@ import ( "github.com/openshift/odo/pkg/kclient" "github.com/openshift/odo/pkg/lclient" "github.com/openshift/odo/pkg/occlient" - "github.com/openshift/odo/pkg/odo/util/experimental" urlLabels "github.com/openshift/odo/pkg/url/labels" "github.com/openshift/odo/pkg/util" "github.com/pkg/errors" @@ -66,11 +65,13 @@ func Get(client *occlient.Client, localConfig *config.LocalConfigInfo, urlName s // search local URL, if it exist in local, update state with remote status if localURL.Name == urlName { if remoteExist { - localURL.Status.State = StateTypePushed + clusterURL := getMachineReadableFormat(*route) + clusterURL.Status.State = StateTypePushed + return clusterURL, nil } else { localURL.Status.State = StateTypeNotPushed + return localURL, nil } - return localURL, nil } } @@ -85,25 +86,72 @@ func Get(client *occlient.Client, localConfig *config.LocalConfigInfo, urlName s return URL{}, errors.New(fmt.Sprintf("the url %v does not exist", urlName)) } -// GetIngress returns ingress spec for given URL name -func GetIngress(kClient *kclient.Client, envSpecificInfo *envinfo.EnvSpecificInfo, urlName string) (iextensionsv1.Ingress, error) { - +// GetIngressOrRoute returns ingress/route spec for given URL name +func GetIngressOrRoute(client *occlient.Client, kClient *kclient.Client, envSpecificInfo *envinfo.EnvSpecificInfo, urlName string, componentName string, routeSupported bool) (URL, error) { + remoteExist := true + var ingress *iextensionsv1.Ingress + var route *routev1.Route + var getRouteErr error // Check whether remote already created the ingress - ingress, err := kClient.GetIngress(urlName) - if err == nil { - return *ingress, nil + ingress, getIngressErr := kClient.GetIngress(urlName) + if kerrors.IsNotFound(getIngressErr) && routeSupported { + // Check whether remote already created the route + route, getRouteErr = client.GetRoute(urlName) + } + if kerrors.IsNotFound(getIngressErr) && (!routeSupported || kerrors.IsNotFound(getRouteErr)) { + remoteExist = false + } else if (getIngressErr != nil && !kerrors.IsNotFound(getIngressErr)) || (getRouteErr != nil && !kerrors.IsNotFound(getRouteErr)) { + if getIngressErr != nil { + return URL{}, errors.Wrap(getIngressErr, "unable to get ingress") + } + return URL{}, errors.Wrap(getRouteErr, "unable to get route") + } + + envinfoURLs := envSpecificInfo.GetURL() + for _, url := range envinfoURLs { + // ignore Docker URLs + if url.Kind == envinfo.DOCKER { + continue + } + if !routeSupported && url.Kind == envinfo.ROUTE { + continue + } + localURL := ConvertEnvinfoURL(url, componentName) + // search local URL, if it exist in local, update state with remote status + if localURL.Name == urlName { + if remoteExist { + if ingress != nil && ingress.Spec.Rules != nil { + // Remote exist, but not in local, so it's deleted status + clusterURL := getMachineReadableFormatIngress(*ingress) + clusterURL.Status.State = StateTypePushed + return clusterURL, nil + } else if route != nil { + clusterURL := getMachineReadableFormat(*route) + clusterURL.Status.State = StateTypePushed + return clusterURL, nil + } + } else { + localURL.Status.State = StateTypeNotPushed + } + return localURL, nil + } } - ingresses := envSpecificInfo.GetURL() - for _, envIngress := range ingresses { - // search local URL check if it exist in local envinfo - if envIngress.Name == urlName { - return iextensionsv1.Ingress{}, errors.New(fmt.Sprintf("the url %v is not created, but exists in local envinfo file. Please run 'odo push'.", urlName)) + if remoteExist { + if ingress != nil && ingress.Spec.Rules != nil { + // Remote exist, but not in local, so it's deleted status + clusterURL := getMachineReadableFormatIngress(*ingress) + clusterURL.Status.State = StateTypeLocallyDeleted + return clusterURL, nil + } else if route != nil { + clusterURL := getMachineReadableFormat(*route) + clusterURL.Status.State = StateTypeLocallyDeleted + return clusterURL, nil } } // can't find the URL in local and remote - return iextensionsv1.Ingress{}, errors.New(fmt.Sprintf("the url %v does not exist", urlName)) + return URL{}, errors.New(fmt.Sprintf("the url %v does not exist", urlName)) } // GetContainer returns Docker URL definition for given URL name @@ -270,7 +318,7 @@ func Create(client *occlient.Client, kClient *kclient.Client, parameters CreateP if err != nil { return "", errors.Wrap(err, "unable to create ingress") } - return GetURLString(GetProtocol(routev1.Route{}, *ingress, isExperimental), "", ingressDomain, isExperimental), nil + return GetURLString(GetProtocol(routev1.Route{}, *ingress), "", ingressDomain, isExperimental), nil } else { if !isRouteSupported { return "", errors.Errorf("routes are not available on non OpenShift clusters") @@ -312,7 +360,7 @@ func Create(client *occlient.Client, kClient *kclient.Client, parameters CreateP if err != nil { return "", errors.Wrap(err, "unable to create route") } - return GetURLString(GetProtocol(*route, iextensionsv1.Ingress{}, isExperimental), route.Spec.Host, "", isExperimental), nil + return GetURLString(GetProtocol(*route, iextensionsv1.Ingress{}), route.Spec.Host, "", isExperimental), nil } } @@ -348,22 +396,22 @@ func ListPushed(client *occlient.Client, componentName string, applicationName s } -// ListPushedIngress lists the ingress URLs for the given component -func ListPushedIngress(client *kclient.Client, componentName string) (iextensionsv1.IngressList, error) { +// ListPushedIngress lists the ingress URLs on cluster for the given component +func ListPushedIngress(client *kclient.Client, componentName string) (URLList, error) { labelSelector := fmt.Sprintf("%v=%v", componentlabels.ComponentLabel, componentName) klog.V(4).Infof("Listing ingresses with label selector: %v", labelSelector) ingresses, err := client.ListIngresses(labelSelector) if err != nil { - return iextensionsv1.IngressList{}, errors.Wrap(err, "unable to list ingress names") + return URLList{}, errors.Wrap(err, "unable to list ingress names") } - var urls []iextensionsv1.Ingress + var urls []URL for _, i := range ingresses { a := getMachineReadableFormatIngress(i) urls = append(urls, a) } - urlList := getMachineReadableFormatForIngressList(urls) + urlList := getMachineReadableFormatForList(urls) return urlList, nil } @@ -408,7 +456,7 @@ func List(client *occlient.Client, localConfig *config.LocalConfigInfo, componen for _, configURL := range localConfigURLs { localURL := ConvertConfigURL(configURL) - var found bool = false + var found = false for _, r := range routes { clusterURL := getMachineReadableFormat(r) if localURL.Name == clusterURL.Name { @@ -426,6 +474,75 @@ func List(client *occlient.Client, localConfig *config.LocalConfigInfo, componen return urlList, nil } +// ListIngressAndRoute returns all Ingress and Route for given component. +func ListIngressAndRoute(oclient *occlient.Client, client *kclient.Client, envSpecificInfo *envinfo.EnvSpecificInfo, componentName string, routeSupported bool) (URLList, error) { + labelSelector := fmt.Sprintf("%v=%v", componentlabels.ComponentLabel, componentName) + klog.V(4).Infof("Listing ingresses with label selector: %v", labelSelector) + ingresses, err := client.ListIngresses(labelSelector) + if err != nil { + return URLList{}, errors.Wrap(err, "unable to list ingress") + } + routes := []routev1.Route{} + if routeSupported { + routes, err = oclient.ListRoutes(labelSelector) + if err != nil { + return URLList{}, errors.Wrap(err, "unable to list routes") + } + } + localEnvinfoURLs := envSpecificInfo.GetURL() + + var urls []URL + + clusterURLMap := make(map[string]URL) + localMap := make(map[string]URL) + for _, i := range ingresses { + clusterURL := getMachineReadableFormatIngress(i) + clusterURLMap[clusterURL.Name] = clusterURL + } + for _, r := range routes { + if r.OwnerReferences != nil && r.OwnerReferences[0].Kind == "Ingress" { + continue + } + clusterURL := getMachineReadableFormat(r) + clusterURLMap[clusterURL.Name] = clusterURL + } + for _, envinfoURL := range localEnvinfoURLs { + // only checks for Ingress and Route URLs + if envinfoURL.Kind == envinfo.DOCKER { + continue + } + if !routeSupported && envinfoURL.Kind == envinfo.ROUTE { + continue + } + localURL := ConvertEnvinfoURL(envinfoURL, componentName) + localMap[localURL.Name] = localURL + } + + for URLName, clusterURL := range clusterURLMap { + _, found := localMap[URLName] + if found { + // URL is in both local env file and cluster + clusterURL.Status.State = StateTypePushed + urls = append(urls, clusterURL) + } else { + // URL is on the cluster but not in local env file + clusterURL.Status.State = StateTypeLocallyDeleted + urls = append(urls, clusterURL) + } + } + + for localName, localURL := range localMap { + _, remoteURLFound := clusterURLMap[localName] + if !remoteURLFound { + // URL is in the local env file but not pushed to cluster + localURL.Status.State = StateTypeNotPushed + urls = append(urls, localURL) + } + } + urlList := getMachineReadableFormatForList(urls) + return urlList, nil +} + // ListDockerURL returns all Docker URLs for given component. func ListDockerURL(client *lclient.Client, componentName string, envSpecificInfo *envinfo.EnvSpecificInfo) (URLList, error) { containers, err := dockerutils.GetComponentContainers(*client, componentName) @@ -506,21 +623,16 @@ func ListDockerURL(client *lclient.Client, componentName string, envSpecificInfo urls = append(urls, localURL) } } - urlList := getMachineReadableFormatForList(urls) return urlList, nil } // GetProtocol returns the protocol string -func GetProtocol(route routev1.Route, ingress iextensionsv1.Ingress, isExperimental bool) string { - if isExperimental { - if ingress.Spec.TLS != nil { - return "https" - } - } else { - if route.Spec.TLS != nil { - return "https" - } +func GetProtocol(route routev1.Route, ingress iextensionsv1.Ingress) string { + if !reflect.DeepEqual(ingress, iextensionsv1.Ingress{}) && ingress.Spec.TLS != nil { + return "https" + } else if !reflect.DeepEqual(route, routev1.Route{}) && route.Spec.TLS != nil { + return "https" } return "http" } @@ -536,11 +648,41 @@ func ConvertConfigURL(configURL config.ConfigURL) URL { Name: configURL.Name, }, Spec: URLSpec{ - Port: configURL.Port, + Port: configURL.Port, + Secure: configURL.Secure, + Kind: envinfo.ROUTE, }, } } +// ConvertEnvinfoURL converts EnvinfoURL to URL +func ConvertEnvinfoURL(envinfoURL envinfo.EnvInfoURL, serviceName string) URL { + hostString := fmt.Sprintf("%s.%s", envinfoURL.Name, envinfoURL.Host) + url := URL{ + TypeMeta: metav1.TypeMeta{ + Kind: "url", + APIVersion: apiVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: envinfoURL.Name, + }, + Spec: URLSpec{ + Port: envinfoURL.Port, + Secure: envinfoURL.Secure, + Kind: envinfoURL.Kind, + }, + } + if envinfoURL.Kind == envinfo.INGRESS { + url.Spec.Host = hostString + if envinfoURL.Secure && len(envinfoURL.TLSSecret) > 0 { + url.Spec.TLSSecret = envinfoURL.TLSSecret + } else if envinfoURL.Secure && envinfoURL.Kind == envinfo.INGRESS { + url.Spec.TLSSecret = fmt.Sprintf("%s-tlssecret", serviceName) + } + } + return url +} + // GetURLString returns a string representation of given url func GetURLString(protocol, URL string, ingressDomain string, isExperimentalMode bool) string { if isExperimentalMode && URL == "" { @@ -634,7 +776,7 @@ func getMachineReadableFormat(r routev1.Route) URL { return URL{ TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: apiVersion}, ObjectMeta: metav1.ObjectMeta{Name: r.Labels[urlLabels.URLLabel]}, - Spec: URLSpec{Host: r.Spec.Host, Port: r.Spec.Port.TargetPort.IntValue(), Protocol: GetProtocol(r, iextensionsv1.Ingress{}, experimental.IsExperimentalModeEnabled()), Secure: r.Spec.TLS != nil}, + Spec: URLSpec{Host: r.Spec.Host, Port: r.Spec.Port.TargetPort.IntValue(), Protocol: GetProtocol(r, iextensionsv1.Ingress{}), Secure: r.Spec.TLS != nil, Kind: envinfo.ROUTE}, } } @@ -650,24 +792,65 @@ func getMachineReadableFormatForList(urls []URL) URLList { } } -func getMachineReadableFormatIngress(i iextensionsv1.Ingress) iextensionsv1.Ingress { - return iextensionsv1.Ingress{ - TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "extensions/v1beta1"}, +func getMachineReadableFormatIngress(i iextensionsv1.Ingress) URL { + url := URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: apiVersion}, ObjectMeta: metav1.ObjectMeta{Name: i.Labels[urlLabels.URLLabel]}, - Spec: iextensionsv1.IngressSpec{TLS: i.Spec.TLS, Rules: i.Spec.Rules}, + Spec: URLSpec{Host: i.Spec.Rules[0].Host, Port: int(i.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal), Secure: i.Spec.TLS != nil, Kind: envinfo.INGRESS}, + } + if i.Spec.TLS != nil { + url.Spec.TLSSecret = i.Spec.TLS[0].SecretName } + return url } -func getMachineReadableFormatForIngressList(ingresses []iextensionsv1.Ingress) iextensionsv1.IngressList { - return iextensionsv1.IngressList{ +// ConvertIngressURLToIngress converts IngressURL to Ingress +func ConvertIngressURLToIngress(ingressURL URL, serviceName string) iextensionsv1.Ingress { + port := intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(ingressURL.Spec.Port), + } + ingress := iextensionsv1.Ingress{ TypeMeta: metav1.TypeMeta{ - Kind: "List", - APIVersion: apiVersion, + Kind: "Ingress", + APIVersion: "extensions/v1beta1", }, - ListMeta: metav1.ListMeta{}, - Items: ingresses, + ObjectMeta: metav1.ObjectMeta{ + Name: ingressURL.Name, + }, + Spec: iextensionsv1.IngressSpec{ + Rules: []iextensionsv1.IngressRule{ + { + Host: ingressURL.Spec.Host, + IngressRuleValue: iextensionsv1.IngressRuleValue{ + HTTP: &iextensionsv1.HTTPIngressRuleValue{ + Paths: []iextensionsv1.HTTPIngressPath{ + { + Path: "/", + Backend: iextensionsv1.IngressBackend{ + ServiceName: serviceName, + ServicePort: port, + }, + }, + }, + }, + }, + }, + }, + }, + } + if len(ingressURL.Spec.TLSSecret) > 0 { + ingress.Spec.TLS = []iextensionsv1.IngressTLS{ + { + Hosts: []string{ + ingressURL.Spec.Host, + }, + SecretName: ingressURL.Spec.TLSSecret, + }, + } } + return ingress } func getMachineReadableFormatDocker(internalPort int, externalPort int, hostIP string, urlName string) URL { @@ -702,8 +885,8 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam Host: url.Host, Port: url.Port, Secure: url.Secure, - tLSSecret: url.TLSSecret, - urlKind: url.Kind, + TLSSecret: url.TLSSecret, + Kind: url.Kind, }, } } @@ -713,9 +896,9 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam for _, url := range urls { urlLOCAL[url.Name] = URL{ Spec: URLSpec{ - Port: url.Port, - Secure: url.Secure, - urlKind: envinfo.ROUTE, + Port: url.Port, + Secure: url.Secure, + Kind: envinfo.ROUTE, }, } } @@ -730,9 +913,10 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam for _, url := range urlList.Items { urlCLUSTER[url.Name] = URL{ Spec: URLSpec{ - Host: url.Spec.Rules[0].Host, - Port: int(url.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal), - urlKind: envinfo.INGRESS, + Host: url.Spec.Host, + Port: url.Spec.Port, + Kind: envinfo.INGRESS, + Secure: url.Spec.Secure, }, } } @@ -746,8 +930,9 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam for _, urlRoute := range urlPushedRoutes.Items { urlCLUSTER[urlRoute.Name] = URL{ Spec: URLSpec{ - Port: urlRoute.Spec.Port, - urlKind: envinfo.ROUTE, + Port: urlRoute.Spec.Port, + Kind: envinfo.ROUTE, + Secure: urlRoute.Spec.Secure, }, } } @@ -764,7 +949,7 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam if ok { // since the host stored in an ingress // is the combination of name and host of the url - if val.Spec.urlKind == envinfo.INGRESS { + if val.Spec.Kind == envinfo.INGRESS { val.Spec.Host = fmt.Sprintf("%v.%v", urlName, val.Spec.Host) } if !reflect.DeepEqual(val.Spec, urlSpec.Spec) { @@ -774,11 +959,11 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam } if !ok || configMismatch { - if urlSpec.Spec.urlKind == envinfo.INGRESS && kClient == nil { + if urlSpec.Spec.Kind == envinfo.INGRESS && kClient == nil { continue } // delete the url - err := Delete(client, kClient, urlName, parameters.ApplicationName, urlSpec.Spec.urlKind) + err := Delete(client, kClient, urlName, parameters.ApplicationName, urlSpec.Spec.Kind) if err != nil { return err } @@ -793,7 +978,7 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam for urlName, urlInfo := range urlLOCAL { _, ok := urlCLUSTER[urlName] if !ok { - if urlInfo.Spec.urlKind == envinfo.INGRESS && kClient == nil { + if urlInfo.Spec.Kind == envinfo.INGRESS && kClient == nil { continue } @@ -804,8 +989,8 @@ func Push(client *occlient.Client, kClient *kclient.Client, parameters PushParam componentName: parameters.ComponentName, applicationName: parameters.ApplicationName, host: urlInfo.Spec.Host, - secretName: urlInfo.Spec.tLSSecret, - urlKind: urlInfo.Spec.urlKind, + secretName: urlInfo.Spec.TLSSecret, + urlKind: urlInfo.Spec.Kind, } host, err := Create(client, kClient, createParameters, parameters.IsRouteSupported, parameters.IsExperimentalModeEnabled) if err != nil { diff --git a/pkg/url/url_test.go b/pkg/url/url_test.go index 1b1d5467b0a..59cfc2f7b47 100644 --- a/pkg/url/url_test.go +++ b/pkg/url/url_test.go @@ -790,9 +790,9 @@ func TestPush(t *testing.T) { Name: "example-app", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: false, + Kind: envinfo.ROUTE, }, }, { @@ -800,9 +800,9 @@ func TestPush(t *testing.T) { Name: "example-1-app", }, Spec: URLSpec{ - Port: 9090, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 9090, + Secure: false, + Kind: envinfo.ROUTE, }, }, }, @@ -846,9 +846,9 @@ func TestPush(t *testing.T) { Name: "example-local-0-app", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: false, + Kind: envinfo.ROUTE, }, }, { @@ -856,9 +856,9 @@ func TestPush(t *testing.T) { Name: "example-local-1-app", }, Spec: URLSpec{ - Port: 9090, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 9090, + Secure: false, + Kind: envinfo.ROUTE, }, }, }, @@ -921,10 +921,10 @@ func TestPush(t *testing.T) { Name: "example", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 8080, + Secure: false, + Host: "com", + Kind: envinfo.INGRESS, }, }, { @@ -932,10 +932,10 @@ func TestPush(t *testing.T) { Name: "example-1", }, Spec: URLSpec{ - Port: 9090, - Secure: false, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 9090, + Secure: false, + Host: "com", + Kind: envinfo.INGRESS, }, }, }, @@ -988,10 +988,10 @@ func TestPush(t *testing.T) { Name: "example-local-0", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 8080, + Secure: false, + Host: "com", + Kind: envinfo.INGRESS, }, }, { @@ -999,10 +999,10 @@ func TestPush(t *testing.T) { Name: "example-local-1", }, Spec: URLSpec{ - Port: 9090, - Secure: false, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 9090, + Secure: false, + Host: "com", + Kind: envinfo.INGRESS, }, }, }, @@ -1071,9 +1071,9 @@ func TestPush(t *testing.T) { Name: "example-local-0", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: false, + Kind: envinfo.ROUTE, }, }, { @@ -1081,10 +1081,10 @@ func TestPush(t *testing.T) { Name: "example-local-1", }, Spec: URLSpec{ - Port: 9090, - Secure: false, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 9090, + Secure: false, + Host: "com", + Kind: envinfo.INGRESS, }, }, }, @@ -1126,8 +1126,8 @@ func TestPush(t *testing.T) { Port: 8080, Secure: true, Host: "com", - tLSSecret: "secret", - urlKind: envinfo.INGRESS, + TLSSecret: "secret", + Kind: envinfo.INGRESS, }, }, }, @@ -1157,9 +1157,9 @@ func TestPush(t *testing.T) { Name: "example-local-0", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: false, + Kind: envinfo.ROUTE, }, }, }, @@ -1196,9 +1196,9 @@ func TestPush(t *testing.T) { Name: "example-local-0-app", }, Spec: URLSpec{ - Port: 8080, - Secure: false, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: false, + Kind: envinfo.ROUTE, }, }, }, @@ -1232,9 +1232,9 @@ func TestPush(t *testing.T) { Name: "example", }, Spec: URLSpec{ - Port: 8080, - Secure: true, - urlKind: envinfo.ROUTE, + Port: 8080, + Secure: true, + Kind: envinfo.ROUTE, }, }, }, @@ -1260,10 +1260,10 @@ func TestPush(t *testing.T) { Name: "example", }, Spec: URLSpec{ - Port: 8080, - Secure: true, - Host: "com", - urlKind: envinfo.INGRESS, + Port: 8080, + Secure: true, + Host: "com", + Kind: envinfo.INGRESS, }, }, }, @@ -1293,8 +1293,8 @@ func TestPush(t *testing.T) { Port: 8080, Secure: true, Host: "com", - tLSSecret: "secret", - urlKind: envinfo.INGRESS, + TLSSecret: "secret", + Kind: envinfo.INGRESS, }, }, }, @@ -1392,13 +1392,13 @@ func TestPush(t *testing.T) { if createdObject.Name == url.Name && (createdObject.Spec.TLS != nil) == url.Spec.Secure && int(createdObject.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.IntVal) == url.Spec.Port && - envinfo.INGRESS == url.Spec.urlKind && + envinfo.INGRESS == url.Spec.Kind && fmt.Sprintf("%v.%v", url.Name, url.Spec.Host) == createdObject.Spec.Rules[0].Host { if url.Spec.Secure { secretName := tt.componentName + "-tlssecret" - if url.Spec.tLSSecret != "" { - secretName = url.Spec.tLSSecret + if url.Spec.TLSSecret != "" { + secretName = url.Spec.TLSSecret } if createdObject.Spec.TLS[0].SecretName == secretName { found = true @@ -1422,7 +1422,7 @@ func TestPush(t *testing.T) { if createdObject.Name == url.Name && (createdObject.Spec.TLS != nil) == url.Spec.Secure && int(createdObject.Spec.Port.TargetPort.IntVal) == url.Spec.Port && - envinfo.ROUTE == url.Spec.urlKind { + envinfo.ROUTE == url.Spec.Kind { found = true break } @@ -1637,3 +1637,571 @@ func TestGetContainerURL(t *testing.T) { }) } } + +func TestListIngressAndRoute(t *testing.T) { + componentName := "testcomponent" + + testURL1 := envinfo.EnvInfoURL{Name: "example-0", Port: 8080, Host: "com", Kind: "ingress"} + testURL2 := envinfo.EnvInfoURL{Name: "example-1", Port: 9090, Host: "com", Kind: "ingress"} + testURL3 := envinfo.EnvInfoURL{Name: "ingressurl3", Port: 8080, Host: "com", Secure: true, Kind: "ingress"} + testURL4 := envinfo.EnvInfoURL{Name: "example", Port: 8080, Kind: "route"} + testURL5 := envinfo.EnvInfoURL{Name: "routeurl2", Port: 8080, Kind: "route"} + testURL6 := envinfo.EnvInfoURL{Name: "routeurl3", Port: 8080, Kind: "route"} + + tests := []struct { + name string + component string + envURLs []envinfo.EnvInfoURL + routeSupported bool + routeList *routev1.RouteList + ingressList *extensionsv1.IngressList + wantURLs []URL + }{ + { + name: "Should retrieve the URL list with both ingress and routes", + component: componentName, + envURLs: []envinfo.EnvInfoURL{testURL2, testURL3, testURL4, testURL5}, + routeSupported: true, + ingressList: fake.GetIngressListWithMultiple(componentName), + routeList: &routev1.RouteList{ + Items: []routev1.Route{ + testingutil.GetSingleRoute(testURL4.Name, testURL4.Port, componentName, ""), + testingutil.GetSingleRoute(testURL6.Name, testURL6.Port, componentName, ""), + }, + }, + wantURLs: []URL{ + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL1.Name}, + Spec: URLSpec{Host: "example-0.com", Port: testURL1.Port, Secure: testURL1.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL2.Name}, + Spec: URLSpec{Host: "example-1.com", Port: testURL2.Port, Secure: testURL2.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL3.Name}, + Spec: URLSpec{Host: "ingressurl3.com", Port: testURL3.Port, Secure: testURL3.Secure, TLSSecret: componentName + "-tlssecret", Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL4.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL4.Port, Secure: testURL4.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL5.Name}, + Spec: URLSpec{Port: testURL5.Port, Secure: testURL5.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL6.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL6.Port, Secure: testURL6.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + }, + }, + { + name: "Should retrieve only ingress URLs with routeSupported equals to false", + component: componentName, + envURLs: []envinfo.EnvInfoURL{testURL2, testURL3, testURL4, testURL5}, + routeList: &routev1.RouteList{}, + ingressList: fake.GetIngressListWithMultiple(componentName), + routeSupported: false, + wantURLs: []URL{ + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL1.Name}, + Spec: URLSpec{Host: "example-0.com", Port: testURL1.Port, Secure: testURL1.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL2.Name}, + Spec: URLSpec{Host: "example-1.com", Port: testURL2.Port, Secure: testURL2.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL3.Name}, + Spec: URLSpec{Host: "ingressurl3.com", Port: testURL3.Port, Secure: testURL3.Secure, TLSSecret: componentName + "-tlssecret", Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + }, + }, + { + name: "Should retrieve only ingress URLs", + component: componentName, + envURLs: []envinfo.EnvInfoURL{testURL2, testURL3}, + routeSupported: true, + routeList: &routev1.RouteList{}, + ingressList: fake.GetIngressListWithMultiple(componentName), + wantURLs: []URL{ + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL1.Name}, + Spec: URLSpec{Host: "example-0.com", Port: testURL1.Port, Secure: testURL1.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL2.Name}, + Spec: URLSpec{Host: "example-1.com", Port: testURL2.Port, Secure: testURL2.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL3.Name}, + Spec: URLSpec{Host: "ingressurl3.com", Port: testURL3.Port, Secure: testURL3.Secure, TLSSecret: componentName + "-tlssecret", Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + }, + }, + { + name: "Should retrieve only route URLs", + component: componentName, + envURLs: []envinfo.EnvInfoURL{testURL4, testURL5}, + routeSupported: true, + routeList: &routev1.RouteList{ + Items: []routev1.Route{ + testingutil.GetSingleRoute(testURL4.Name, testURL4.Port, componentName, ""), + testingutil.GetSingleRoute(testURL6.Name, testURL6.Port, componentName, ""), + }, + }, + ingressList: &extensionsv1.IngressList{}, + wantURLs: []URL{ + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL4.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL4.Port, Secure: testURL4.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL5.Name}, + Spec: URLSpec{Port: testURL5.Port, Secure: testURL5.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL6.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL6.Port, Secure: testURL6.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // initialising virtual envinfo + esi := &envinfo.EnvSpecificInfo{} + for _, url := range tt.envURLs { + err := esi.SetConfiguration("url", url) + if err != nil { + // discard the error, since no physical file to write + t.Log("Expected error since no physical env file to write") + } + } + // initialising the fakeclient + fkclient, fkclientset := kclient.FakeNew() + fkclient.Namespace = "default" + fkclientset.Kubernetes.PrependReactor("list", "ingresses", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, tt.ingressList, nil + }) + fakeoclient, fakeoclientSet := occlient.FakeNew() + fakeoclientSet.RouteClientset.PrependReactor("list", "routes", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, tt.routeList, nil + }) + + urls, err := ListIngressAndRoute(fakeoclient, fkclient, esi, componentName, tt.routeSupported) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + if len(urls.Items) != len(tt.wantURLs) { + t.Errorf("numbers of url listed does not match, expected %v, got %v", len(tt.wantURLs), len(urls.Items)) + } + actualURLMap := make(map[string]URL) + for _, actualURL := range urls.Items { + actualURLMap[actualURL.Name] = actualURL + } + for _, wantURL := range tt.wantURLs { + if !reflect.DeepEqual(actualURLMap[wantURL.Name], wantURL) { + t.Errorf("Expected %v, got %v", wantURL, actualURLMap[wantURL.Name]) + } + } + }) + } + +} + +func TestGetIngressOrRoute(t *testing.T) { + componentName := "testcomponent" + + testURL1 := envinfo.EnvInfoURL{Name: "ingressurl1", Port: 8080, Host: "com", Kind: "ingress"} + testURL2 := envinfo.EnvInfoURL{Name: "ingressurl2", Port: 8080, Host: "com", Kind: "ingress"} + testURL3 := envinfo.EnvInfoURL{Name: "ingressurl3", Port: 8080, Host: "com", Secure: true, Kind: "ingress"} + testURL4 := envinfo.EnvInfoURL{Name: "example", Port: 8080, Kind: "route"} + testURL5 := envinfo.EnvInfoURL{Name: "routeurl2", Port: 8080, Kind: "route"} + testURL6 := envinfo.EnvInfoURL{Name: "routeurl3", Port: 8080, Kind: "route"} + esi := &envinfo.EnvSpecificInfo{} + err := esi.SetConfiguration("url", testURL2) + if err != nil { + // discard the error, since no physical file to write + t.Log("Expected error since no physical env file to write") + } + err = esi.SetConfiguration("url", testURL3) + if err != nil { + // discard the error, since no physical file to write + t.Log("Expected error since no physical env file to write") + } + err = esi.SetConfiguration("url", testURL4) + if err != nil { + // discard the error, since no physical file to write + t.Log("Expected error since no physical env file to write") + } + err = esi.SetConfiguration("url", testURL5) + if err != nil { + // discard the error, since no physical file to write + t.Log("Expected error since no physical env file to write") + } + + tests := []struct { + name string + component string + urlName string + routeSupported bool + pushedIngress *extensionsv1.Ingress + pushedRoute routev1.Route + wantURL URL + wantErr bool + }{ + { + name: "Case 1: Successfully retrieve the locally deleted Ingress URL object", + component: componentName, + urlName: testURL1.Name, + routeSupported: true, + pushedIngress: fake.GetSingleIngress(testURL1.Name, componentName), + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL1.Name}, + Spec: URLSpec{Host: "ingressurl1.com", Port: testURL1.Port, Secure: testURL1.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + wantErr: false, + }, + { + name: "Case 2: Successfully retrieve the pushed Ingress URL object", + component: componentName, + urlName: testURL2.Name, + routeSupported: true, + pushedIngress: fake.GetSingleIngress(testURL2.Name, componentName), + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL2.Name}, + Spec: URLSpec{Host: "ingressurl2.com", Port: testURL2.Port, Secure: testURL2.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + wantErr: false, + }, + { + name: "Case 3: Successfully retrieve the not pushed Ingress URL object", + component: componentName, + urlName: testURL3.Name, + routeSupported: true, + pushedIngress: nil, + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL3.Name}, + Spec: URLSpec{Host: "ingressurl3.com", Port: testURL3.Port, Secure: testURL3.Secure, TLSSecret: componentName + "-tlssecret", Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + wantErr: false, + }, + { + name: "Case 4: Should show error if the url does not exist", + component: componentName, + urlName: "notExistURL", + routeSupported: true, + pushedIngress: nil, + pushedRoute: routev1.Route{}, + wantErr: true, + }, + { + name: "Case 4: Successfully retrieve the pushed Route URL object", + component: componentName, + urlName: testURL4.Name, + routeSupported: true, + pushedIngress: nil, + pushedRoute: testingutil.GetSingleRoute(testURL4.Name, testURL4.Port, componentName, ""), + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL4.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL4.Port, Secure: testURL4.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + wantErr: false, + }, + { + name: "Case 5: Successfully retrieve the not pushed Route URL object", + component: componentName, + urlName: testURL5.Name, + routeSupported: true, + pushedIngress: nil, + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL5.Name}, + Spec: URLSpec{Port: testURL5.Port, Secure: testURL5.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + wantErr: false, + }, + { + name: "Case 6: Successfully retrieve the locally deleted Route URL object", + component: componentName, + urlName: testURL6.Name, + routeSupported: true, + pushedIngress: nil, + pushedRoute: testingutil.GetSingleRoute(testURL6.Name, testURL6.Port, componentName, ""), + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL6.Name}, + Spec: URLSpec{Protocol: "http", Port: testURL6.Port, Secure: testURL6.Secure, Kind: envinfo.ROUTE}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + wantErr: false, + }, + { + name: "Case 7: If route is not supported, should show error and empty URL when describing a route", + component: componentName, + urlName: testURL5.Name, + routeSupported: false, + pushedIngress: nil, + pushedRoute: routev1.Route{}, + wantURL: URL{}, + wantErr: true, + }, + { + name: "Case 8: If route is not supported, should retrieve not pushed ingress", + component: componentName, + urlName: testURL3.Name, + routeSupported: false, + pushedIngress: nil, + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL3.Name}, + Spec: URLSpec{Host: "ingressurl3.com", Port: testURL3.Port, Secure: testURL3.Secure, TLSSecret: componentName + "-tlssecret", Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeNotPushed, + }, + }, + wantErr: false, + }, + { + name: "Case 9: If route is not supported, should retrieve pushed ingress", + component: componentName, + urlName: testURL2.Name, + routeSupported: false, + pushedIngress: fake.GetSingleIngress(testURL2.Name, componentName), + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL2.Name}, + Spec: URLSpec{Host: "ingressurl2.com", Port: testURL2.Port, Secure: testURL2.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypePushed, + }, + }, + wantErr: false, + }, + { + name: "Case 10: If route is not supported, should retrieve locally deleted ingress", + component: componentName, + urlName: testURL1.Name, + routeSupported: false, + pushedIngress: fake.GetSingleIngress(testURL1.Name, componentName), + pushedRoute: routev1.Route{}, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: testURL1.Name}, + Spec: URLSpec{Host: "ingressurl1.com", Port: testURL1.Port, Secure: testURL1.Secure, Kind: envinfo.INGRESS}, + Status: URLStatus{ + State: StateTypeLocallyDeleted, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fkclient, fkclientset := kclient.FakeNew() + fkclient.Namespace = "default" + if tt.pushedIngress != nil { + fkclientset.Kubernetes.PrependReactor("get", "ingresses", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, tt.pushedIngress, nil + }) + } + client, fakeClientSet := occlient.FakeNew() + if !reflect.DeepEqual(tt.pushedRoute, routev1.Route{}) { + fakeClientSet.RouteClientset.PrependReactor("get", "routes", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, &tt.pushedRoute, nil + }) + } + url, err := GetIngressOrRoute(client, fkclient, esi, tt.urlName, tt.component, tt.routeSupported) + if !tt.wantErr == (err != nil) { + t.Errorf("unexpected error %v", err) + } + if !reflect.DeepEqual(url, tt.wantURL) { + t.Errorf("Expected %v, got %v", tt.wantURL, url) + } + }) + } +} + +func TestConvertEnvinfoURL(t *testing.T) { + serviceName := "testService" + urlName := "testURL" + host := "com" + secretName := "test-tls-secret" + tests := []struct { + name string + envInfoURL envinfo.EnvInfoURL + wantURL URL + }{ + { + name: "Case 1: insecure URL", + envInfoURL: envinfo.EnvInfoURL{ + Name: urlName, + Host: host, + Port: 8080, + Secure: false, + Kind: envinfo.INGRESS, + }, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: urlName}, + Spec: URLSpec{Host: fmt.Sprintf("%s.%s", urlName, host), Port: 8080, Secure: false, Kind: envinfo.INGRESS}, + }, + }, + { + name: "Case 2: secure Ingress URL without tls secret defined", + envInfoURL: envinfo.EnvInfoURL{ + Name: urlName, + Host: host, + Port: 8080, + Secure: true, + Kind: envinfo.INGRESS, + }, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: urlName}, + Spec: URLSpec{Host: fmt.Sprintf("%s.%s", urlName, host), Port: 8080, Secure: true, TLSSecret: fmt.Sprintf("%s-tlssecret", serviceName), Kind: envinfo.INGRESS}, + }, + }, + { + name: "Case 3: secure Ingress URL with tls secret defined", + envInfoURL: envinfo.EnvInfoURL{ + Name: urlName, + Host: host, + Port: 8080, + Secure: true, + TLSSecret: secretName, + Kind: envinfo.INGRESS, + }, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: urlName}, + Spec: URLSpec{Host: fmt.Sprintf("%s.%s", urlName, host), Port: 8080, Secure: true, TLSSecret: secretName, Kind: envinfo.INGRESS}, + }, + }, + { + name: "Case 4: Insecure route URL", + envInfoURL: envinfo.EnvInfoURL{ + Name: urlName, + Port: 8080, + Kind: envinfo.ROUTE, + }, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: urlName}, + Spec: URLSpec{Port: 8080, Secure: false, Kind: envinfo.ROUTE}, + }, + }, + { + name: "Case 4: Secure route URL", + envInfoURL: envinfo.EnvInfoURL{ + Name: urlName, + Port: 8080, + Secure: true, + Kind: envinfo.ROUTE, + }, + wantURL: URL{ + TypeMeta: metav1.TypeMeta{Kind: "url", APIVersion: "odo.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: urlName}, + Spec: URLSpec{Port: 8080, Secure: true, Kind: envinfo.ROUTE}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + url := ConvertEnvinfoURL(tt.envInfoURL, serviceName) + if !reflect.DeepEqual(url, tt.wantURL) { + t.Errorf("Expected %v, got %v", tt.wantURL, url) + } + }) + } +} diff --git a/pkg/util/concurrent.go b/pkg/util/concurrent.go new file mode 100644 index 00000000000..9674850d33c --- /dev/null +++ b/pkg/util/concurrent.go @@ -0,0 +1,76 @@ +package util + +import ( + "sync" +) + +// A task to execute in a go-routine +type ConcurrentTask struct { + ToRun func(errChannel chan error) +} + +// run encapsulates the work to be done by calling the ToRun function +func (ct ConcurrentTask) run(errChannel chan error, wg *sync.WaitGroup) { + defer wg.Done() + ct.ToRun(errChannel) +} + +// Records tasks to be run concurrently with go-routines +type ConcurrentTasks struct { + tasks []ConcurrentTask +} + +// NewConcurrentTasks creates a new ConcurrentTasks instance, dimensioned to accept at least the specified number of tasks +func NewConcurrentTasks(taskNumber int) *ConcurrentTasks { + return &ConcurrentTasks{tasks: make([]ConcurrentTask, 0, taskNumber)} +} + +// Add adds the specified ConcurrentTask to the list of tasks to be run concurrently +func (ct *ConcurrentTasks) Add(task ConcurrentTask) { + if len(ct.tasks) == 0 { + ct.tasks = make([]ConcurrentTask, 0, 7) + } + ct.tasks = append(ct.tasks, task) +} + +// Run concurrently runs the added tasks failing on the first error +// Based on https://garrypolley.com/2016/02/10/golang-routines-errors/ +func (ct *ConcurrentTasks) Run() error { + var wg sync.WaitGroup + finished := make(chan bool, 1) // this along with wg.Wait() is why the error handling works and doesn't deadlock + errChannel := make(chan error) + + for _, task := range ct.tasks { + wg.Add(1) + go task.run(errChannel, &wg) + } + + // Put the wait group in a go routine. + // By putting the wait group in the go routine we ensure either all pass + // and we close the "finished" channel or we wait forever for the wait group + // to finish. + // + // Waiting forever is okay because of the blocking select below. + go func() { + wg.Wait() + close(finished) + }() + + // This select will block until one of the two channels returns a value. + // This means on the first failure in the go routines above the errChannel will release a + // value first. Because there is a "return" statement in the err check this function will + // exit when an error occurs. + // + // Due to the blocking on wg.Wait() the finished channel will not get a value unless all + // the go routines before were successful because not all the wg.Done() calls would have + // happened. + select { + case <-finished: + case err := <-errChannel: + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/util/util.go b/pkg/util/util.go index 2c7746a9749..98e32e0d021 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -4,10 +4,11 @@ import ( "archive/zip" "bufio" "context" + "crypto/rand" "fmt" "io" "io/ioutil" - "math/rand" + "math/big" "net" "net/http" "net/url" @@ -81,10 +82,13 @@ func ConvertLabelsToSelector(labels map[string]string) string { // GenerateRandomString generates a random string of lower case characters of // the given size func GenerateRandomString(n int) string { - rand.Seed(time.Now().UnixNano()) b := make([]rune, n) + for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] + // this error is ignored because it fails only when the 2nd arg of Int() is less then 0 + // which wont happen + n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letterRunes)))) + b[i] = letterRunes[n.Int64()] } return string(b) } @@ -693,6 +697,11 @@ func HTTPGetRequest(url string) ([]byte, error) { } defer resp.Body.Close() + // we have a non 1xx / 2xx status, return an error + if (resp.StatusCode - 300) > 0 { + return nil, fmt.Errorf("error retrieving %s: %s", url, http.StatusText(resp.StatusCode)) + } + bytes, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err @@ -807,8 +816,9 @@ func GetGitHubZipURL(repoURL string) (string, error) { } // GetAndExtractZip downloads a zip file from a URL with a http prefix or -// takes an absolute path prefixed with file:// and extracts it to a destination -func GetAndExtractZip(zipURL string, destination string) error { +// takes an absolute path prefixed with file:// and extracts it to a destination. +// pathToUnzip specifies the path within the zip folder to extract +func GetAndExtractZip(zipURL string, destination string, pathToUnzip string) error { if zipURL == "" { return errors.Errorf("Empty zip url: %s", zipURL) } @@ -842,18 +852,23 @@ func GetAndExtractZip(zipURL string, destination string) error { return errors.Errorf("Invalid Zip URL: %s . Should either be prefixed with file://, http:// or https://", zipURL) } - _, err := Unzip(pathToZip, destination) + filenames, err := Unzip(pathToZip, destination, pathToUnzip) if err != nil { return err } + if len(filenames) == 0 { + return errors.New("no files were unzipped, ensure that the project repo is not empty or that sparseCheckoutDir has a valid path") + } + return nil } -// Unzip will decompress a zip archive, moving all files and folders -// within the zip file (parameter 1) to an output directory (parameter 2). +// Unzip will decompress a zip archive, moving specified files and folders +// within the zip file (parameter 1) to an output directory (parameter 2) // Source: https://golangcode.com/unzip-files-in-go/ -func Unzip(src, dest string) ([]string, error) { +// pathToUnzip (parameter 3) is the path within the zip folder to extract +func Unzip(src, dest, pathToUnzip string) ([]string, error) { var filenames []string r, err := zip.OpenReader(src) @@ -862,16 +877,46 @@ func Unzip(src, dest string) ([]string, error) { } defer r.Close() - for _, f := range r.File { + // change path separator to correct character + pathToUnzip = filepath.FromSlash(pathToUnzip) + for _, f := range r.File { // Store filename/path for returning and using later on - index := strings.Index(f.Name, "/") + index := strings.Index(f.Name, string(os.PathSeparator)) filename := f.Name[index+1:] if filename == "" { continue } + + // if sparseCheckoutDir has a pattern + match, err := filepath.Match(pathToUnzip, filename) + if err != nil { + return filenames, err + } + + // removes first slash of pathToUnzip if present, adds trailing slash + pathToUnzip = strings.TrimPrefix(pathToUnzip, string(os.PathSeparator)) + if pathToUnzip != "" && !strings.HasSuffix(pathToUnzip, string(os.PathSeparator)) { + pathToUnzip = pathToUnzip + string(os.PathSeparator) + } + // destination filepath before trim fpath := filepath.Join(dest, filename) + // used for pattern matching + fpathDir := filepath.Dir(fpath) + + // check for prefix or match + if strings.HasPrefix(filename, pathToUnzip) { + filename = strings.TrimPrefix(filename, pathToUnzip) + } else if !strings.HasPrefix(filename, pathToUnzip) && !match && !sliceContainsString(fpathDir, filenames) { + continue + } + // adds trailing slash to destination if needed as filepath.Join removes it + if (len(filename) == 1 && os.IsPathSeparator(filename[0])) || filename == "" { + fpath = dest + string(os.PathSeparator) + } else { + fpath = filepath.Join(dest, filename) + } // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { return filenames, fmt.Errorf("%s: illegal file path", fpath) @@ -966,7 +1011,7 @@ func ValidateK8sResourceName(key string, value string) error { _, err2 := strconv.ParseFloat(value, 64) if err1 != nil || err2 == nil { - return errors.Errorf("%s is not valid, %s should conform the following requirements: %s", key, key, requirements) + return errors.Errorf("%s \"%s\" is not valid, %s should conform the following requirements: %s", key, value, key, requirements) } return nil @@ -1004,3 +1049,72 @@ func ValidateURL(sourceURL string) error { return nil } + +// ValidateFile validates the file +func ValidateFile(filePath string) error { + // Check if the file path exist + file, err := os.Stat(filePath) + if err != nil { + return err + } + + if file.IsDir() { + return errors.Errorf("%s exists but it's not a file", filePath) + } + + return nil +} + +// CopyFile copies file from source path to destination path +func CopyFile(srcPath string, dstPath string, info os.FileInfo) error { + // Check if the source file path exists + err := ValidateFile(srcPath) + if err != nil { + return err + } + + // Open source file + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() // #nosec G307 + + // Create destination file + dstFile, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstFile.Close() // #nosec G307 + + // Ensure destination file has the same file mode with source file + err = os.Chmod(dstFile.Name(), info.Mode()) + if err != nil { + return err + } + + // Copy file + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return err + } + + return nil +} + +// PathEqual compare the paths to determine if they are equal +func PathEqual(firstPath string, secondPath string) bool { + firstAbsPath, _ := GetAbsPath(firstPath) + secondAbsPath, _ := GetAbsPath(secondPath) + return firstAbsPath == secondAbsPath +} + +// sliceContainsString checks for existence of given string in given slice +func sliceContainsString(str string, slice []string) bool { + for _, b := range slice { + if b == str { + return true + } + } + return false +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 1dae55a141f..addb9aff043 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -1652,3 +1652,188 @@ func TestValidateURL(t *testing.T) { }) } } + +func TestValidateFile(t *testing.T) { + // Create temp dir and temp file + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("Failed to create temp dir: %s, error: %v", tempDir, err) + } + tempFile, err := ioutil.TempFile(tempDir, "") + if err != nil { + t.Errorf("Failed to create temp file: %s, error: %v", tempFile.Name(), err) + } + defer tempFile.Close() + + tests := []struct { + name string + filePath string + wantErr bool + }{ + { + name: "Case 1: Valid file path", + filePath: tempFile.Name(), + wantErr: false, + }, + { + name: "Case 2: Invalid file path", + filePath: "!@#", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotErr := false + err := ValidateFile(tt.filePath) + if err != nil { + gotErr = true + } + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("Got error: %t, want error: %t", gotErr, tt.wantErr) + } + }) + } +} + +func TestCopyFile(t *testing.T) { + // Create temp dir + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("Failed to create temp dir: %s, error: %v", tempDir, err) + } + + // Create temp file under temp dir as source file + tempFile, err := ioutil.TempFile(tempDir, "") + if err != nil { + t.Errorf("Failed to create temp file: %s, error: %v", tempFile.Name(), err) + } + defer tempFile.Close() + + srcPath := tempFile.Name() + fakePath := "!@#/**" + dstPath := filepath.Join(tempDir, "dstFile") + info, _ := os.Stat(srcPath) + + tests := []struct { + name string + srcPath string + dstPath string + wantErr bool + }{ + { + name: "Case 1: Copy successfully", + srcPath: srcPath, + dstPath: dstPath, + wantErr: false, + }, + { + name: "Case 2: Invalid source path", + srcPath: fakePath, + dstPath: dstPath, + wantErr: true, + }, + { + name: "Case 3: Invalid destination path", + srcPath: srcPath, + dstPath: fakePath, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotErr := false + err = CopyFile(tt.srcPath, tt.dstPath, info) + if err != nil { + gotErr = true + } + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("Got error: %t, want error: %t", gotErr, tt.wantErr) + } + }) + } +} + +func TestPathEqual(t *testing.T) { + currentDir, err := os.Getwd() + if err != nil { + t.Errorf("Can't get absolute path of current working directory with error: %v", err) + } + fileAbsPath := filepath.Join(currentDir, "file") + fileRelPath := filepath.Join(".", "file") + + tests := []struct { + name string + firstPath string + secondPath string + want bool + }{ + { + name: "Case 1: Two paths (two absolute paths) are equal", + firstPath: fileAbsPath, + secondPath: fileAbsPath, + want: true, + }, + { + name: "Case 2: Two paths (one absolute path, one relative path) are equal", + firstPath: fileAbsPath, + secondPath: fileRelPath, + want: true, + }, + { + name: "Case 3: Two paths are not equal", + firstPath: fileAbsPath, + secondPath: filepath.Join(fileAbsPath, "file"), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := PathEqual(tt.firstPath, tt.secondPath) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Got: %t, want %t", got, tt.want) + } + }) + } +} + +func TestSliceContainsString(t *testing.T) { + tests := []struct { + name string + stringVal string + slice []string + wantVal bool + }{ + { + name: "Case 1: string in valid slice", + stringVal: "string", + slice: []string{"string", "string2"}, + wantVal: true, + }, + { + name: "Case 2: string not in valid slice", + stringVal: "string3", + slice: []string{"string", "string2"}, + wantVal: false, + }, + { + name: "Case 3: string not in empty slice", + stringVal: "string", + slice: []string{}, + wantVal: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotVal := sliceContainsString(tt.stringVal, tt.slice) + + if !reflect.DeepEqual(gotVal, tt.wantVal) { + t.Errorf("Got %v, want %v", gotVal, tt.wantVal) + } + }) + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go index 78e80cc80cd..daeff011038 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -12,7 +12,7 @@ Changing these values will change the versioning information when releasing odo. var ( // VERSION is version number that will be displayed when running ./odo version - VERSION = "v1.2.1" + VERSION = "v1.2.3" // GITCOMMIT is hash of the commit that will be displayed when running ./odo version // this will be overwritten when running build like this: go build -ldflags="-X github.com/openshift/odo/cmd.GITCOMMIT=$(GITCOMMIT)" diff --git a/pkg/watch/watch.go b/pkg/watch/watch.go index b2e68294f27..f6df05c5297 100644 --- a/pkg/watch/watch.go +++ b/pkg/watch/watch.go @@ -9,6 +9,7 @@ import ( "time" "github.com/openshift/odo/pkg/devfile/adapters/common" + "github.com/openshift/odo/pkg/envinfo" "github.com/openshift/odo/pkg/util" "github.com/openshift/odo/pkg/occlient" @@ -30,7 +31,7 @@ type WatchParameters struct { FileIgnores []string // Custom function that can be used to push detected changes to remote pod. For more info about what each of the parameters to this function, please refer, pkg/component/component.go#PushLocal WatchHandler func(*occlient.Client, string, string, string, io.Writer, []string, []string, bool, []string, bool) error - // Custom function that can be used to push detected changes to remote devfile pod. For more info about what each of the parameters to this function, please refer, pkg/component/component.go#PushLocal + // Custom function that can be used to push detected changes to remote devfile pod. For more info about what each of the parameters to this function, please refer, pkg/devfile/adapters/interface.go#PlatformAdapter DevfileWatchHandler func(common.PushParameters) error // This is a channel added to signal readiness of the watch command to the external channel listeners StartChan chan bool @@ -40,6 +41,14 @@ type WatchParameters struct { PushDiffDelay int // Parameter whether or not to show build logs Show bool + // EnvSpecificInfo contains infomation of env.yaml file + EnvSpecificInfo *envinfo.EnvSpecificInfo + // DevfileInitCmd takes the init command through the command line and overwrites devfile init command + DevfileInitCmd string + // DevfileBuildCmd takes the build command through the command line and overwrites devfile build command + DevfileBuildCmd string + // DevfileRunCmd takes the run command through the command line and overwrites devfile run command + DevfileRunCmd string } // addRecursiveWatch handles adding watches recursively for the path provided @@ -309,6 +318,10 @@ func WatchAndPush(client *occlient.Client, out io.Writer, parameters WatchParame WatchDeletedFiles: deletedPaths, IgnoredFiles: parameters.FileIgnores, ForceBuild: false, + DevfileInitCmd: parameters.DevfileInitCmd, + DevfileBuildCmd: parameters.DevfileBuildCmd, + DevfileRunCmd: parameters.DevfileRunCmd, + EnvSpecificInfo: *parameters.EnvSpecificInfo, } err = parameters.DevfileWatchHandler(pushParams) @@ -326,6 +339,10 @@ func WatchAndPush(client *occlient.Client, out io.Writer, parameters WatchParame WatchDeletedFiles: deletedPaths, IgnoredFiles: parameters.FileIgnores, ForceBuild: false, + DevfileInitCmd: parameters.DevfileInitCmd, + DevfileBuildCmd: parameters.DevfileBuildCmd, + DevfileRunCmd: parameters.DevfileRunCmd, + EnvSpecificInfo: *parameters.EnvSpecificInfo, } err = parameters.DevfileWatchHandler(pushParams) diff --git a/pkg/watch/watch_test.go b/pkg/watch/watch_test.go index 0d5f4ebfceb..b7d6c1e1a65 100644 --- a/pkg/watch/watch_test.go +++ b/pkg/watch/watch_test.go @@ -4,11 +4,6 @@ package watch import ( "fmt" - "github.com/openshift/odo/pkg/devfile/adapters/common" - "github.com/openshift/odo/pkg/occlient" - "github.com/openshift/odo/pkg/testingutil" - "github.com/openshift/odo/pkg/util" - "github.com/pkg/errors" "io" "os" "path/filepath" @@ -19,6 +14,13 @@ import ( "sync" "testing" "time" + + "github.com/openshift/odo/pkg/devfile/adapters/common" + "github.com/openshift/odo/pkg/envinfo" + "github.com/openshift/odo/pkg/occlient" + "github.com/openshift/odo/pkg/testingutil" + "github.com/openshift/odo/pkg/util" + "github.com/pkg/errors" ) // setUpF8AnalyticsComponentSrc sets up a mock analytics component source base for observing changes to source files. @@ -809,6 +811,7 @@ func TestWatchAndPush(t *testing.T) { if tt.isExperimental { watchParameters.DevfileWatchHandler = mockDevFilePush + watchParameters.EnvSpecificInfo = &envinfo.EnvSpecificInfo{} } else { watchParameters.ApplicationName = tt.applicationName watchParameters.WatchHandler = mockPushLocal diff --git a/scripts/bump-version.sh b/scripts/bump-version.sh index 7271a9a5046..450ef1b5d2c 100755 --- a/scripts/bump-version.sh +++ b/scripts/bump-version.sh @@ -30,10 +30,6 @@ echo "* Bumping version in scripts/rpm-prepare.sh" sed -i "s/\(ODO_VERSION:=\)[0-9]*\.[0-9]*\.[0-9]*/\1${NEW_VERSION}/g" scripts/rpm-prepare.sh check_version scripts/rpm-prepare.sh -echo "* Bumping version in scripts/installer.sh" -sed -i "s/\(LATEST_VERSION=\)\"v[0-9]*\.[0-9]*\.[0-9]*\(?:-\w+\)\?\"/\1\"v${NEW_VERSION}\"/g" scripts/installer.sh -check_version scripts/installer.sh - echo "* Bumping version in Dockerfile.rhel" sed -i "s/\(version=\)[0-9]*\.[0-9]*\.[0-9]*/\1${NEW_VERSION}/g" Dockerfile.rhel check_version Dockerfile.rhel diff --git a/scripts/changelog-script.sh b/scripts/changelog-script.sh index 924db9138f4..a57c16dd22b 100755 --- a/scripts/changelog-script.sh +++ b/scripts/changelog-script.sh @@ -28,6 +28,7 @@ To install odo, follow our installation guide at [docs.openshift.com]($INSTALLAT After each release, binaries are synced to [mirror.openshift.com]($MIRROR)" > /tmp/base github_changelog_generator \ +--max-issues 500 \ --user openshift \ --project odo \ -t $GITHUB_TOKEN \ diff --git a/scripts/installer.sh b/scripts/installer.sh deleted file mode 100755 index 098b0661721..00000000000 --- a/scripts/installer.sh +++ /dev/null @@ -1,297 +0,0 @@ -#!/bin/bash -set -e - -# The version of odo to install. Possible values - "master" and "latest" -# master - builds from git master branch -# latest - released versions specified by LATEST_VERSION variable -ODO_VERSION="latest" - -# Latest released odo version -LATEST_VERSION="v1.2.1" - -GITHUB_RELEASES_URL="https://github.com/openshift/odo/releases/download/${LATEST_VERSION}" -BINTRAY_URL="https://dl.bintray.com/odo/odo/latest" - -INSTALLATION_PATH="/usr/local/bin/" -PRIVILEGED_EXECUTION="sh -c" - -DEBIAN_GPG_PUBLIC_KEY="https://bintray.com/user/downloadSubjectPublicKey?username=bintray" -DEBIAN_MASTER_REPOSITORY="https://dl.bintray.com/odo/odo-deb-dev" -DEBIAN_LATEST_REPOSITORY="https://dl.bintray.com/odo/odo-deb-releases" - -RPM_MASTER_YUM_REPO="https://bintray.com/odo/odo-rpm-dev/rpm" -RPM_LATEST_YUM_REPO="https://bintray.com/odo/odo-rpm-releases/rpm" - -SUPPORTED_PLATFORMS=" -darwin-amd64 -linux-amd64 -linux-arm -" - -# Used to determine whether to install or uninstall odo -INSTALLER_ACTION="" - -parse_installer_action_flag () -{ - case "$@" in - - # Set INSTALLER_ACTION to uninstall odo or install odo - # Include --uninstall flag when running installer.sh to uninstall and simply run installer.sh to install latest version of odo - --uninstall) - INSTALLER_ACTION="uninstall" - ;; - - *) - INSTALLER_ACTION="install" - ;; - esac -} - -echo_stderr () -{ - echo "$@" >&2 -} - -command_exists() { - distribution=$(get_distribution) - - case "$distribution" in - - ubuntu|debian) - # Use which to verify install/uninstall on ubuntu and debian distributions - which "$@" > /dev/null 2>&1 - ;; - - *) - command -v "$@" > /dev/null 2>&1 - ;; - esac -} - -check_platform() { - kernel="$(uname -s)" - if [ "$(uname -m)" = "x86_64" ]; then - arch="amd64" - fi - - platform_type=$(echo "${kernel}-${arch}" | tr '[:upper:]' '[:lower:]') - - if ! echo "# $SUPPORTED_PLATFORMS" | grep "$platform_type" > /dev/null; then - echo_stderr " -# The installer has detected your platform to be $platform_type, which is -# currently not supported by this installer script. - -# Please visit the following URL for detailed installation steps: -# https://github.com/openshift/odo/#installation" - exit 1 - fi - echo "$platform_type" -} - -get_distribution() { - lsb_dist="" - if [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - echo "$lsb_dist" -} - -set_privileged_execution() { - if [ "$(id -u)" != "0" ]; then - if command_exists sudo; then - echo "# Installer will run privileged commands with sudo" - PRIVILEGED_EXECUTION='sudo -E sh -c' - elif command_exists su ; then - echo "# Installer will run privileged commands with \"su -c\"" - PRIVILEGED_EXECUTION='su -c' - else - echo_stderr "# -This installer needs to run as root. The current user is not root, and we could not find "sudo" or "su" installed on the system. Please run again with root privileges, or install "sudo" or "su" packages. -" - fi - else - echo "# Installer is being run as root" - fi -} - -invalid_odo_version_error() { - echo_stderr "# Invalid value of odo version provided. Provide master or latest." - exit 1 -} - -installer_odo() { - echo "# Detecting distribution..." - - platform="$(check_platform)" - echo "# Detected platform: $platform" - - if [ "$INSTALLER_ACTION" == "install" ] && command_exists odo; then - echo_stderr echo_stderr "# -odo version \"$(odo version --client)\" is already installed on your system. Running this installer script might cause issues with your current installation. If you want to install odo using this script, please remove the current installation of odo from you system. -Aborting now! -" - exit 1 - elif [ "$INSTALLER_ACTION" == "uninstall" ] && ! command_exists odo; then - echo_stderr "# odo is not installed on your system. Ending execution of uninstall script." - exit 1 - fi - - # macOS specific steps - if [ $platform = "darwin-amd64" ]; then - if ! command_exists brew; then - echo_stderr "# brew command does not exist. Please install brew and run the installer again." - fi - - if [ "$INSTALLER_ACTION" == "install" ]; then - brew tap kadel/odo - echo "# Installing odo ${ODO_VERSION} on macOS" - case $ODO_VERSION in - master) - brew install kadel/odo/odo -- HEAD - ;; - latest) - brew install kadel/odo/odo - esac - elif [ "$INSTALLER_ACTION" == "uninstall" ]; then - echo "# Uninstalling odo on macOS" - brew uninstall odo - fi - - return 0 - fi - - set_privileged_execution - - distribution=$(get_distribution) - echo "# Detected distribution: $distribution" - - case "$distribution" in - - ubuntu|debian) - if [ "$INSTALLER_ACTION" == "install" ]; then - echo "# Installing odo version: $ODO_VERSION on $distribution" - echo "# Installing pre-requisites..." - $PRIVILEGED_EXECUTION "apt-get update" - $PRIVILEGED_EXECUTION "apt-get install -y gnupg apt-transport-https curl" - - echo "# "Adding GPG public key... - $PRIVILEGED_EXECUTION "curl -L \"$DEBIAN_GPG_PUBLIC_KEY\" | apt-key add -" - - echo "# Adding repository to /etc/apt/sources.list" - case "$ODO_VERSION" in - - master) - $PRIVILEGED_EXECUTION "echo \"deb $DEBIAN_MASTER_REPOSITORY stretch main\" | tee -a /etc/apt/sources.list" - ;; - latest) - $PRIVILEGED_EXECUTION "echo \"deb $DEBIAN_LATEST_REPOSITORY stretch main\" | tee -a /etc/apt/sources.list" - ;; - *) - invalid_odo_version_error - esac - - $PRIVILEGED_EXECUTION "apt-get update" - $PRIVILEGED_EXECUTION "apt-get install -y odo" - elif [ "$INSTALLER_ACTION" == "uninstall" ]; then - echo "# Uninstalling odo..." - $PRIVILEGED_EXECUTION "apt-get remove -y odo" - fi - ;; - - centos|fedora) - - package_manager="" - case "$distribution" in - - fedora) - package_manager="dnf" - ;; - centos) - package_manager="yum" - ;; - esac - - if [ "$INSTALLER_ACTION" == "install" ]; then - echo "# Installing odo version $ODO_VERSION on $distribution" - - echo "# Adding odo repo under /etc/yum.repos.d/" - case "$ODO_VERSION" in - - master) - $PRIVILEGED_EXECUTION "curl -L $RPM_MASTER_YUM_REPO -o /etc/yum.repos.d/bintray-odo-odo-rpm-dev.repo" - ;; - latest) - $PRIVILEGED_EXECUTION "curl -L $RPM_LATEST_YUM_REPO -o /etc/yum.repos.d/bintray-odo-odo-rpm-releases.repo" - ;; - *) - invalid_odo_version_error - esac - - $PRIVILEGED_EXECUTION "$package_manager install -y odo" - elif [ "$INSTALLER_ACTION" == "uninstall" ]; then - echo "# Uninstalling odo..." - $PRIVILEGED_EXECUTION "$package_manager remove -y odo" - fi - ;; - - *) - if [ "$INSTALLER_ACTION" == "install" ]; then - echo "# Could not identify distribution. Proceeding with a binary install..." - - BINARY_URL="" - TMP_DIR=$(mktemp -d) - case "$ODO_VERSION" in - - master) - BINARY_URL="$BINTRAY_URL/$platform/odo" - echo "# Downloading odo from $BINARY_URL" - curl -Lo $TMP_DIR/odo "$BINARY_URL" - ;; - - latest) - BINARY_URL="$GITHUB_RELEASES_URL/odo-$platform.tar.gz" - echo "# Downloading odo from $BINARY_URL" - curl -Lo $TMP_DIR/odo.tar.gz "$BINARY_URL" - echo "# Extracting odo.tar.gz" - tar -xvzf $TMP_DIR/odo.tar.gz - ;; - - *) - invalid_odo_version_error - esac - - echo "# Setting execute permissions on odo" - chmod +x $TMP_DIR/odo - echo "# Moving odo binary to $INSTALLATION_PATH" - $PRIVILEGED_EXECUTION "mv $TMP_DIR/odo $INSTALLATION_PATH" - echo "# odo has been successfully installed on your machine" - rm -r $TMP_DIR - - elif [ "$INSTALLER_ACTION" == "uninstall" ]; then - echo "# Proceeding with removing binary..." - rm -r $INSTALLATION_PATH/odo - fi - ;; - esac -} - -verify_odo() { - if [ $INSTALLER_ACTION == "install" ] && command_exists odo; then - echo " -# Verification complete! -# odo version \"$(odo version --client)\" has been installed at $(type -P odo) -" - elif [ "$INSTALLER_ACTION" == "uninstall" ] && ! command_exists odo; then - echo " -# Verification complete! -# odo has been uninstalled" - else - echo_stderr " -# Something is wrong with odo installer. Please run the installer script again. If the issue persists, please create an issue at https://github.com/openshift/odo/issues" - exit 1 - fi -} - -parse_installer_action_flag "$@" -installer_odo -verify_odo diff --git a/scripts/oc-cluster.sh b/scripts/oc-cluster.sh index 88b8a24aca9..95b39ba9346 100755 --- a/scripts/oc-cluster.sh +++ b/scripts/oc-cluster.sh @@ -14,6 +14,9 @@ sudo cat /etc/docker/daemon.json sudo service docker start sudo service docker status +# Docker version that oc cluster up uses +docker version + ## download oc binaries sudo wget $OPENSHIFT_CLIENT_BINARY_URL -O /tmp/openshift-origin-client-tools.tar.gz 2> /dev/null > /dev/null diff --git a/scripts/openshiftci-presubmit-integration-tests.sh b/scripts/openshiftci-periodic-tests.sh similarity index 93% rename from scripts/openshiftci-presubmit-integration-tests.sh rename to scripts/openshiftci-periodic-tests.sh index eae5b32b05f..f69180c71c1 100755 --- a/scripts/openshiftci-presubmit-integration-tests.sh +++ b/scripts/openshiftci-periodic-tests.sh @@ -21,4 +21,7 @@ make test-cmd-login-logout make test-cmd-project make test-operator-hub +# E2e tests +make test-e2e-all + odo logout diff --git a/scripts/rpm-prepare.sh b/scripts/rpm-prepare.sh index b4876236fb0..2b8819d5748 100755 --- a/scripts/rpm-prepare.sh +++ b/scripts/rpm-prepare.sh @@ -5,7 +5,7 @@ set +ex echo "Reading ODO_VERSION, ODO_RELEASE and GIT_COMMIT env, if they are set" # Change version as needed. In most cases ODO_RELEASE would not be touched unless # we want to do a re-lease of same version as we are not backporting -export ODO_VERSION=${ODO_VERSION:=1.2.1} +export ODO_VERSION=${ODO_VERSION:=1.2.3} export ODO_RELEASE=${ODO_RELEASE:=1} export GIT_COMMIT=${GIT_COMMIT:=`git rev-parse --short HEAD 2>/dev/null`} diff --git a/scripts/setup-operators.sh b/scripts/setup-operators.sh index c0f4e0bad0a..8d05d2c3825 100644 --- a/scripts/setup-operators.sh +++ b/scripts/setup-operators.sh @@ -28,9 +28,9 @@ install_etcd_operator(){ kind: Subscription metadata: name: etcd - namespace: ${OPERATOR_HUB_PROJECT} + namespace: openshift-operators spec: - channel: singlenamespace-alpha + channel: clusterwide-alpha installPlanApproval: Automatic name: etcd source: community-operators @@ -51,25 +51,11 @@ do fi done -# Now onto namespace bound operator -# Create OperatorGroup -oc create -f - <> myfile-init.log" - workdir: /data - - name: devBuild - actions: - - type: exec - component: runtime - command: "echo hello >> myfile.log" - workdir: /data - - name: devRun - actions: - - type: exec - component: runtime2 - command: "cat myfile.log" - workdir: /data \ No newline at end of file + - exec: + id: devbuild + component: runtime + commandLine: "echo hello >> myfile.log" + workingDir: /data + group: + kind: build + isDefault: true + - exec: + id: devrun + component: runtime2 + commandLine: "cat myfile.log" + workingDir: /data + group: + kind: run + isDefault: true \ No newline at end of file diff --git a/tests/examples/source/devfiles/nodejs/devfile-without-devbuild.yaml b/tests/examples/source/devfiles/nodejs/devfile-without-devbuild.yaml index 0c1df6998c5..8f6cc410819 100644 --- a/tests/examples/source/devfiles/nodejs/devfile-without-devbuild.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile-without-devbuild.yaml @@ -1,28 +1,31 @@ -apiVersion: 1.0.0 +schemaVersion: 2.0.0 metadata: name: test-devfile projects: - - - name: nodejs-web-app - source: - type: git + - name: nodejs-web-app + git: location: "https://github.com/che-samples/web-nodejs-sample.git" components: - - type: dockerimage - image: quay.io/eclipse/che-nodejs10-ubi:nightly - endpoints: - - name: "3000/tcp" - port: 3000 - alias: runtime - env: - - name: FOO - value: "bar" - memoryLimit: 1024Mi - mountSources: true + - container: + name: runtime + image: quay.io/eclipse/che-nodejs10-ubi:nightly + memoryLimit: 1024Mi + env: + - name: FOO + value: "bar" + endpoints: + - name: "3000/tcp" + configuration: + protocol: tcp + scheme: http + targetPort: 3000 + mountSources: true commands: - - name: devrun - actions: - - type: exec - component: runtime - command: "npm install && nodemon app.js" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app \ No newline at end of file + - exec: + id: devrun + component: runtime + commandLine: "npm install && nodemon app.js" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: run + isDefault: true \ No newline at end of file diff --git a/tests/examples/source/devfiles/nodejs/devfile-without-devinit.yaml b/tests/examples/source/devfiles/nodejs/devfile-without-devinit.yaml index a05b737ae1c..9645b1e751c 100644 --- a/tests/examples/source/devfiles/nodejs/devfile-without-devinit.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile-without-devinit.yaml @@ -1,34 +1,39 @@ -apiVersion: 1.0.0 +schemaVersion: 2.0.0 metadata: name: test-devfile projects: - - - name: nodejs-web-app - source: - type: git + - name: nodejs-web-app + git: location: "https://github.com/che-samples/web-nodejs-sample.git" components: - - type: dockerimage - image: quay.io/eclipse/che-nodejs10-ubi:nightly - endpoints: - - name: "3000/tcp" - port: 3000 - alias: runtime - env: - - name: FOO - value: "bar" - memoryLimit: 1024Mi - mountSources: true + - container: + name: runtime + image: quay.io/eclipse/che-nodejs10-ubi:nightly + memoryLimit: 1024Mi + env: + - name: FOO + value: "bar" + endpoints: + - name: "3000/tcp" + configuration: + protocol: tcp + scheme: http + targetPort: 3000 + mountSources: true commands: - - name: devbuild - actions: - - type: exec - component: runtime - command: "npm install" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app - - name: devrun - actions: - - type: exec - component: runtime - command: "nodemon app.js" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app \ No newline at end of file + - exec: + id: devbuild + component: runtime + commandLine: "npm install" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: build + isDefault: true + - exec: + id: devrun + component: runtime + commandLine: "nodemon app.js" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: run + isDefault: true \ No newline at end of file diff --git a/tests/examples/source/devfiles/nodejs/devfile.yaml b/tests/examples/source/devfiles/nodejs/devfile.yaml index 3599d25ac50..70ed8920c2d 100644 --- a/tests/examples/source/devfiles/nodejs/devfile.yaml +++ b/tests/examples/source/devfiles/nodejs/devfile.yaml @@ -1,46 +1,50 @@ -apiVersion: 1.0.0 +schemaVersion: 2.0.0 metadata: name: test-devfile projects: - - - name: nodejs-web-app - source: - type: git + - name: nodejs-web-app + git: location: "https://github.com/che-samples/web-nodejs-sample.git" components: - - type: dockerimage - image: quay.io/eclipse/che-nodejs10-ubi:nightly - endpoints: - - name: "3000/tcp" - port: 3000 - alias: runtime - env: - - name: FOO - value: "bar" - memoryLimit: 1024Mi - mountSources: true + - container: + name: runtime + image: quay.io/eclipse/che-nodejs10-ubi:nightly + memoryLimit: 1024Mi + endpoints: + - name: "3000/tcp" + configuration: + protocol: tcp + scheme: http + targetPort: 3000 + mountSources: true commands: - - name: build - actions: - - type: exec - component: runtime - command: "npm install" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app - - name: devbuild - actions: - - type: exec - component: runtime - command: "npm install" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app - - name: run - actions: - - type: exec - component: runtime - command: "nodemon app.js" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app - - name: devrun - actions: - - type: exec - component: runtime - command: "nodemon app.js" - workdir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + - exec: + id: devbuild + component: runtime + commandLine: "npm install" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: build + isDefault: true + - exec: + id: build + component: runtime + commandLine: "npm install" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: build + - exec: + id: devrun + component: runtime + commandLine: "nodemon app.js" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: run + isDefault: true + - exec: + id: run + component: runtime + commandLine: "nodemon app.js" + workingDir: ${CHE_PROJECTS_ROOT}/nodejs-web-app/app + group: + kind: run \ No newline at end of file diff --git a/tests/examples/source/devfiles/springboot/devfile.yaml b/tests/examples/source/devfiles/springboot/devfile.yaml index 185328ebe5d..86b8b0fb5a6 100644 --- a/tests/examples/source/devfiles/springboot/devfile.yaml +++ b/tests/examples/source/devfiles/springboot/devfile.yaml @@ -1,55 +1,52 @@ --- -apiVersion: 1.0.0 +schemaVersion: 2.0.0 metadata: - generateName: java-spring-boot + name: java-spring-boot projects: - - - name: springbootproject - source: - type: git + - name: springbootproject + git: location: "https://github.com/maysunfaisal/springboot.git" components: - - - type: chePlugin - id: redhat/java/latest - memoryLimit: 1512Mi - - - type: dockerimage - image: maysunfaisal/springbootbuild - alias: tools - memoryLimit: 768Mi - command: ['tail'] - args: [ '-f', '/dev/null'] - mountSources: true - volumes: - - name: springbootpvc - containerPath: /data - - - type: dockerimage - image: maysunfaisal/springbootruntime - alias: runtime - memoryLimit: 768Mi - endpoints: - - name: '8080/tcp' - port: 8080 - mountSources: false - volumes: - - name: springbootpvc - containerPath: /data + - container: + name: tools + image: maysunfaisal/springbootbuild + memoryLimit: 768Mi + command: ['tail'] + args: [ '-f', '/dev/null'] + volumeMounts: + - name: springbootpvc + path: /data + mountSources: true + - container: + name: runtime + image: maysunfaisal/springbootruntime + memoryLimit: 768Mi + command: ['tail'] + args: [ '-f', '/dev/null'] + endpoints: + - name: "8080/tcp" + configuration: + protocol: tcp + scheme: http + targetPort: 8080 + volumeMounts: + - name: springbootpvc + path: /data + mountSources: false commands: - - - name: devBuild - actions: - - - type: exec - component: tools - command: "/artifacts/bin/build-container-full.sh" - workdir: /projects/springbootproject - - - name: devRun - actions: - - - type: exec - component: runtime - command: "/artifacts/bin/start-server.sh" - workdir: / + - exec: + id: devbuild + component: tools + commandLine: "/artifacts/bin/build-container-full.sh" + workingDir: /projects/springbootproject + group: + kind: build + isDefault: true + - exec: + id: devrun + component: runtime + commandLine: "/artifacts/bin/start-server.sh" + workingDir: / + group: + kind: run + isDefault: true \ No newline at end of file diff --git a/tests/examples/source/devfiles/springboot/devfile-init-without-build.yaml b/tests/examples/source/devfilesV1/springboot/devfile-init-without-build.yaml similarity index 80% rename from tests/examples/source/devfiles/springboot/devfile-init-without-build.yaml rename to tests/examples/source/devfilesV1/springboot/devfile-init-without-build.yaml index a6f032a0415..f8cc3b72a6d 100644 --- a/tests/examples/source/devfiles/springboot/devfile-init-without-build.yaml +++ b/tests/examples/source/devfilesV1/springboot/devfile-init-without-build.yaml @@ -3,18 +3,15 @@ apiVersion: 1.0.0 metadata: generateName: java-spring-boot projects: - - - name: springbootproject + - name: springbootproject source: type: git location: "https://github.com/maysunfaisal/springboot.git" components: - - - type: chePlugin + - type: chePlugin id: redhat/java/latest memoryLimit: 1512Mi - - - type: dockerimage + - type: dockerimage image: maysunfaisal/springbootbuild alias: tools memoryLimit: 768Mi @@ -24,8 +21,7 @@ components: volumes: - name: springbootpvc containerPath: /data - - - type: dockerimage + - type: dockerimage image: maysunfaisal/springbootruntime alias: runtime memoryLimit: 768Mi @@ -37,19 +33,15 @@ components: - name: springbootpvc containerPath: /data commands: - - - name: devInit + - name: devInit actions: - - - type: exec + - type: exec component: tools command: "echo hello; touch /data/afile.txt" workdir: /projects/springbootproject - - - name: devRun + - name: devRun actions: - - - type: exec + - type: exec component: runtime command: "/artifacts/bin/start-server.sh" - workdir: / + workdir: / \ No newline at end of file diff --git a/tests/examples/source/devfiles/springboot/devfile-init.yaml b/tests/examples/source/devfilesV1/springboot/devfile-init.yaml similarity index 97% rename from tests/examples/source/devfiles/springboot/devfile-init.yaml rename to tests/examples/source/devfilesV1/springboot/devfile-init.yaml index fd4c494bbb7..cad14c21bd6 100644 --- a/tests/examples/source/devfiles/springboot/devfile-init.yaml +++ b/tests/examples/source/devfilesV1/springboot/devfile-init.yaml @@ -37,10 +37,10 @@ components: - name: springbootpvc containerPath: /data commands: - - + - name: devinit actions: - - + - type: exec component: tools command: "echo hello" @@ -60,4 +60,4 @@ commands: type: exec component: runtime command: "/artifacts/bin/start-server.sh" - workdir: / + workdir: / \ No newline at end of file diff --git a/tests/helper/helper_cli.go b/tests/helper/helper_cli.go index c88e9b3f9bb..bebc9e004b3 100644 --- a/tests/helper/helper_cli.go +++ b/tests/helper/helper_cli.go @@ -12,4 +12,6 @@ type CliRunner interface { GetVolumeMountNamesandPathsFromContainer(deployName string, containerName, namespace string) string WaitAndCheckForExistence(resourceType, namespace string, timeoutMinutes int) bool GetServices(namespace string) string + CreateRandNamespaceProject() string + DeleteNamespaceProject(projectName string) } diff --git a/tests/helper/helper_filesystem.go b/tests/helper/helper_filesystem.go index aef7bbd00c2..beae41f903e 100644 --- a/tests/helper/helper_filesystem.go +++ b/tests/helper/helper_filesystem.go @@ -2,7 +2,6 @@ package helper import ( "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -11,6 +10,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/openshift/odo/pkg/util" ) // CreateNewContext create new empty temporary directory @@ -89,7 +89,7 @@ func CopyExampleDevFile(devfilePath, targetDst string) { info, err := os.Stat(src) Expect(err).NotTo(HaveOccurred()) - err = copyFile(src, targetDst, info) + err = util.CopyFile(src, targetDst, info) Expect(err).NotTo(HaveOccurred()) } @@ -107,7 +107,7 @@ func ReplaceString(filename string, oldString string, newString string) { f, err := ioutil.ReadFile(filename) Expect(err).NotTo(HaveOccurred()) - newContent := strings.Replace(string(f), oldString, newString, 1) + newContent := strings.ReplaceAll(string(f), oldString, newString) err = ioutil.WriteFile(filename, []byte(newContent), 0600) Expect(err).NotTo(HaveOccurred()) @@ -137,29 +137,7 @@ func copyDir(src string, dst string, info os.FileInfo) error { return err } - return copyFile(src, dst, info) -} - -// copyFile copy one file to another location -func copyFile(src, dst string, info os.FileInfo) error { - dFile, err := os.Create(dst) - if err != nil { - return err - } - defer dFile.Close() // #nosec G307 - - sFile, err := os.Open(src) - if err != nil { - return err - } - defer sFile.Close() // #nosec G307 - - if err = os.Chmod(dFile.Name(), info.Mode()); err != nil { - return err - } - - _, err = io.Copy(dFile, sFile) - return err + return util.CopyFile(src, dst, info) } // CreateFileWithContent creates a file at the given path and writes the given content diff --git a/tests/helper/helper_generic.go b/tests/helper/helper_generic.go index 09fa06ebc62..028865514c0 100644 --- a/tests/helper/helper_generic.go +++ b/tests/helper/helper_generic.go @@ -4,29 +4,21 @@ import ( "bytes" "encoding/json" "fmt" - "math/rand" "os" "os/exec" + "path/filepath" "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" + "github.com/openshift/odo/pkg/util" ) -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - // RandString returns a random string of given length func RandString(n int) string { - const letterBytes = "abcdefghijklmnopqrstuvwxyz" - b := make([]byte, n) - for i := range b { - b[i] = letterBytes[rand.Intn(len(letterBytes))] - } - return string(b) + return util.GenerateRandomString(n) } // WaitForCmdOut runs a command until it gets @@ -46,7 +38,7 @@ func WaitForCmdOut(program string, args []string, timeout int, errOnFail bool, c for { select { case <-pingTimeout: - Fail(fmt.Sprintf("Timeout out after %v minutes", timeout)) + Fail(fmt.Sprintf("Timeout after %v minutes", timeout)) case <-tick: session := CmdRunner(program, args...) @@ -145,7 +137,7 @@ func WatchNonRetCmdStdOut(cmdStr string, timeout time.Duration, check func(outpu for { select { case <-timeoutCh: - Fail("Timeout out after " + string(timeout) + " minutes") + Fail(fmt.Sprintf("Timeout after %.2f minutes", timeout.Minutes())) case <-ticker.C: if !startedFileModification && startIndicatorFunc(buf.String()) { startedFileModification = true @@ -167,3 +159,21 @@ func GetUserHomeDir() string { Expect(err).NotTo(HaveOccurred()) return homeDir } + +// LocalKubeconfigSet sets the KUBECONFIG to the temporary config file +func LocalKubeconfigSet(context string) { + originalKubeCfg := os.Getenv("KUBECONFIG") + if originalKubeCfg == "" { + homeDir := GetUserHomeDir() + originalKubeCfg = filepath.Join(homeDir, ".kube", "config") + } + copyKubeConfigFile(originalKubeCfg, filepath.Join(context, "config")) +} + +// GetCliRunner gets the running cli against Kubernetes or OpenShift +func GetCliRunner() CliRunner { + if os.Getenv("KUBERNETES") == "true" { + return NewKubectlRunner("kubectl") + } + return NewOcRunner("oc") +} diff --git a/tests/helper/helper_kubectl.go b/tests/helper/helper_kubectl.go index 782fabba4eb..d84c7147eb3 100644 --- a/tests/helper/helper_kubectl.go +++ b/tests/helper/helper_kubectl.go @@ -82,7 +82,7 @@ func (kubectl KubectlRunner) WaitAndCheckForExistence(resourceType, namespace st for { select { case <-pingTimeout: - Fail(fmt.Sprintf("Timeout out after %v minutes", timeoutMinutes)) + Fail(fmt.Sprintf("Timeout after %d minutes", timeoutMinutes)) case <-tick: session := CmdRunner(kubectl.path, "get", resourceType, "--namespace", namespace) @@ -104,3 +104,20 @@ func (kubectl KubectlRunner) GetServices(namespace string) string { output := string(session.Wait().Out.Contents()) return output } + +// CreateRandNamespaceProject create new project with random name in kubernetes cluster (10 letters) +func (kubectl KubectlRunner) CreateRandNamespaceProject() string { + projectName := RandString(10) + fmt.Fprintf(GinkgoWriter, "Creating a new project: %s\n", projectName) + CmdShouldPass("kubectl", "create", "namespace", projectName) + CmdShouldPass("kubectl", "config", "set-context", "--current", "--namespace", projectName) + session := CmdShouldPass("kubectl", "get", "namespaces") + Expect(session).To(ContainSubstring(projectName)) + return projectName +} + +// DeleteNamespaceProject deletes a specified project in kubernetes cluster +func (kubectl KubectlRunner) DeleteNamespaceProject(projectName string) { + fmt.Fprintf(GinkgoWriter, "Deleting project: %s\n", projectName) + CmdShouldPass("kubectl", "delete", "namespaces", projectName) +} diff --git a/tests/helper/helper_oc.go b/tests/helper/helper_oc.go index 829211638d0..a8f877869a5 100644 --- a/tests/helper/helper_oc.go +++ b/tests/helper/helper_oc.go @@ -50,6 +50,20 @@ func (oc OcRunner) GetCurrentProject() string { return "" } +// GetCurrentServerURL retrieves the URL of the server we're currently connected to +// returns empty if not connected or an error occurred +func (oc OcRunner) GetCurrentServerURL() string { + session := CmdRunner(oc.path, "project") + session.Wait() + if session.ExitCode() == 0 { + output := strings.TrimSpace(string(session.Out.Contents())) + // format is: Using project "" on server "". + a := strings.Split(output, "\"") + return a[len(a)-2] // last entry is ".", we need the one before that + } + return "" +} + // GetFirstURL returns the url of the first Route that it can find for given component func (oc OcRunner) GetFirstURL(component string, app string, project string) string { session := CmdRunner(oc.path, "get", "route", @@ -264,8 +278,8 @@ func (oc OcRunner) SourceLocationBC(componentName string, appName string, projec // checkForImageStream checks if there is a ImageStram with name and tag in openshift namespace func (oc OcRunner) checkForImageStream(name string, tag string) bool { // first check if there is ImageStream with given name - names := CmdShouldPass(oc.path, "get", "is", "-n", "openshift", - "-o", "jsonpath='{range .items[*]}{.metadata.name}{\"\\n\"}{end}'") + names := strings.Trim(CmdShouldPass(oc.path, "get", "is", "-n", "openshift", + "-o", "jsonpath='{range .items[*]}{.metadata.name}{\"\\n\"}{end}'"), "'") scanner := bufio.NewScanner(strings.NewReader(names)) namePresent := false for scanner.Scan() { @@ -276,8 +290,8 @@ func (oc OcRunner) checkForImageStream(name string, tag string) bool { tagPresent := false // if there is a ImageStream check if there is a given tag if namePresent { - tags := CmdShouldPass(oc.path, "get", "is", name, "-n", "openshift", - "-o", "jsonpath='{range .spec.tags[*]}{.name}{\"\\n\"}{end}'") + tags := strings.Trim(CmdShouldPass(oc.path, "get", "is", name, "-n", "openshift", + "-o", "jsonpath='{range .spec.tags[*]}{.name}{\"\\n\"}{end}'"), "'") scanner := bufio.NewScanner(strings.NewReader(tags)) for scanner.Scan() { if scanner.Text() == tag { @@ -475,7 +489,7 @@ func (oc OcRunner) WaitAndCheckForExistence(resourceType, namespace string, time for { select { case <-pingTimeout: - Fail(fmt.Sprintf("Timeout out after %v minutes", timeoutMinutes)) + Fail(fmt.Sprintf("Timeout after %d minutes", timeoutMinutes)) case <-tick: session := CmdRunner(oc.path, "get", resourceType, "--namespace", namespace) @@ -505,3 +519,20 @@ func (oc OcRunner) VerifyResourceDeleted(resourceType, resourceName, namespace s output := string(session.Wait().Out.Contents()) Expect(output).NotTo(ContainSubstring(resourceName)) } + +// CreateRandNamespaceProject create new project with random name in oc cluster (10 letters) +func (oc OcRunner) CreateRandNamespaceProject() string { + projectName := RandString(10) + fmt.Fprintf(GinkgoWriter, "Creating a new project: %s\n", projectName) + session := CmdShouldPass("odo", "project", "create", projectName, "-w", "-v4") + Expect(session).To(ContainSubstring("New project created")) + Expect(session).To(ContainSubstring(projectName)) + return projectName +} + +// DeleteNamespaceProject deletes a specified project in oc cluster +func (oc OcRunner) DeleteNamespaceProject(projectName string) { + fmt.Fprintf(GinkgoWriter, "Deleting project: %s\n", projectName) + session := CmdShouldPass("odo", "project", "delete", projectName, "-f") + Expect(session).To(ContainSubstring("Deleted project : " + projectName)) +} diff --git a/tests/helper/kubernetes_utils.go b/tests/helper/kubernetes_utils.go index 5900ec4f150..34ef355fe8d 100644 --- a/tests/helper/kubernetes_utils.go +++ b/tests/helper/kubernetes_utils.go @@ -6,31 +6,15 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/openshift/odo/pkg/util" ) -// CopyKubeConfigFile copies default kubeconfig file into current context config file -func CopyKubeConfigFile(kubeConfigFile, tempConfigFile string) string { +// copyKubeConfigFile copies default kubeconfig file into current temporary context config file +func copyKubeConfigFile(kubeConfigFile, tempConfigFile string) { info, err := os.Stat(kubeConfigFile) Expect(err).NotTo(HaveOccurred()) - err = copyFile(kubeConfigFile, tempConfigFile, info) + err = util.CopyFile(kubeConfigFile, tempConfigFile, info) Expect(err).NotTo(HaveOccurred()) os.Setenv("KUBECONFIG", tempConfigFile) - return tempConfigFile -} - -// CreateRandNamespace create new project with random name in kubernetes cluster (10 letters) -func CreateRandNamespace(context string) string { - projectName := RandString(10) - fmt.Fprintf(GinkgoWriter, "Creating a new project: %s\n", projectName) - CmdShouldPass("kubectl", "create", "namespace", projectName) - CmdShouldPass("kubectl", "config", "set-context", context, "--namespace", projectName) - session := CmdShouldPass("kubectl", "get", "namespaces") - Expect(session).To(ContainSubstring(projectName)) - return projectName -} - -// DeleteNamespace deletes a specified project in kubernetes cluster -func DeleteNamespace(projectName string) { - fmt.Fprintf(GinkgoWriter, "Deleting project: %s\n", projectName) - CmdShouldPass("kubectl", "delete", "namespaces", projectName) + fmt.Fprintf(GinkgoWriter, "Setting KUBECONFIG=%s\n", tempConfigFile) } diff --git a/tests/integration/cmd_app_test.go b/tests/integration/cmd_app_test.go index 75f010b366f..9e39a81734f 100644 --- a/tests/integration/cmd_app_test.go +++ b/tests/integration/cmd_app_test.go @@ -56,8 +56,8 @@ var _ = Describe("odo app command tests", func() { appDelete := helper.CmdShouldFail("odo", "app", "delete", "test", "--project", project, "-f") Expect(appDelete).To(ContainSubstring("test app does not exists")) - appDescribe := helper.CmdShouldPass("odo", "app", "describe", "test", "--project", project) - Expect(appDescribe).To(ContainSubstring("Application test has no components or services deployed.")) + appDescribe := helper.CmdShouldFail("odo", "app", "describe", "test", "--project", project) + Expect(appDescribe).To(ContainSubstring("test app does not exists")) }) }) @@ -82,8 +82,8 @@ var _ = Describe("odo app command tests", func() { Expect(desiredCompListJSON).Should(MatchJSON(actualCompListJSON)) helper.CmdShouldPass("odo", "app", "describe") - desiredDesAppJSON := fmt.Sprintf(`{"kind":"Application","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myapp","namespace":"%s","creationTimestamp":null},"spec":{}}`, project) - actualDesAppJSON := helper.CmdShouldPass("odo", "app", "describe", "myapp", "-o", "json") + desiredDesAppJSON := fmt.Sprintf(`{"kind":"Application","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"app","namespace":"%s","creationTimestamp":null},"spec":{"components": ["nodejs"]}}`, project) + actualDesAppJSON := helper.CmdShouldPass("odo", "app", "describe", "app", "-o", "json") Expect(desiredDesAppJSON).Should(MatchJSON(actualDesAppJSON)) helper.CmdShouldPass("odo", "app", "delete", "-f") diff --git a/tests/integration/cmd_debug_test.go b/tests/integration/cmd_debug_test.go index ef3b6151b8a..99f2d8fe051 100644 --- a/tests/integration/cmd_debug_test.go +++ b/tests/integration/cmd_debug_test.go @@ -55,7 +55,7 @@ var _ = Describe("odo debug command tests", func() { // Make sure that the debug information output, outputs correctly. // We do *not* check the json output since the debugProcessID will be different each time. - helper.WaitForCmdOut("odo", []string{"debug", "info", "--context", context, "-o", "json"}, 1, true, func(output string) bool { + helper.WaitForCmdOut("odo", []string{"debug", "info", "--context", context, "-o", "json"}, 1, false, func(output string) bool { if strings.Contains(output, `"kind": "OdoDebugInfo"`) && strings.Contains(output, `"localPort": `+freePort) { return true diff --git a/tests/integration/cmd_pref_config_test.go b/tests/integration/cmd_pref_config_test.go index a5250443d66..595f0e73a67 100644 --- a/tests/integration/cmd_pref_config_test.go +++ b/tests/integration/cmd_pref_config_test.go @@ -65,10 +65,7 @@ var _ = Describe("odo preference and config command tests", func() { }) It("should get the default global config keys", func() { configOutput := helper.CmdShouldPass("odo", "preference", "view") - Expect(configOutput).To(ContainSubstring("UpdateNotification")) - Expect(configOutput).To(ContainSubstring("NamePrefix")) - Expect(configOutput).To(ContainSubstring("Timeout")) - Expect(configOutput).To(ContainSubstring("PushTarget")) + helper.MatchAllInOutput(configOutput, []string{"UpdateNotification", "NamePrefix", "Timeout", "PushTarget"}) updateNotificationValue := helper.GetPreferenceValue("UpdateNotification") Expect(updateNotificationValue).To(BeEmpty()) namePrefixValue := helper.GetPreferenceValue("NamePrefix") @@ -289,8 +286,7 @@ var _ = Describe("odo preference and config command tests", func() { helper.CmdShouldPass("odo", "config", "unset", "--env", "PORT", "--context", context) helper.CmdShouldPass("odo", "config", "unset", "--env", "SECRET_KEY", "--context", context) configValue := helper.CmdShouldPass("odo", "config", "view", "--context", context) - Expect(configValue).To(Not(ContainSubstring(("PORT")))) - Expect(configValue).To(Not(ContainSubstring(("SECRET_KEY")))) + helper.DontMatchAllInOutput(configValue, []string{"PORT", "SECRET_KEY"}) }) It("should check for existence of environment variable in config before unsetting it", func() { helper.CmdShouldPass("odo", "create", "nodejs", "--project", project, "--context", context) @@ -322,8 +318,7 @@ var _ = Describe("odo preference and config command tests", func() { kubeconfigOld := os.Getenv("KUBECONFIG") os.Setenv("KUBECONFIG", "/no/such/path") configValue := helper.CmdShouldPass("odo", "config", "view", "--context", context) - Expect(configValue).To(ContainSubstring("hello")) - Expect(configValue).To(ContainSubstring("world")) + helper.MatchAllInOutput(configValue, []string{"hello", "world"}) os.Setenv("KUBECONFIG", kubeconfigOld) }) diff --git a/tests/integration/cmd_push_test.go b/tests/integration/cmd_push_test.go index 25892af3f1b..ef1447579c6 100644 --- a/tests/integration/cmd_push_test.go +++ b/tests/integration/cmd_push_test.go @@ -150,8 +150,7 @@ var _ = Describe("odo push command tests", func() { dir := envs["ODO_S2I_DEPLOYMENT_DIR"] stdOut := oc.ExecListDir(podName, project, dir) - Expect(stdOut).To(ContainSubstring(("foobar.txt"))) - Expect(stdOut).To(ContainSubstring(("testdir"))) + helper.MatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) // Now we delete the file and dir and push helper.DeleteDir(newFilePath) @@ -160,8 +159,7 @@ var _ = Describe("odo push command tests", func() { // Then check to see if it's truly been deleted stdOut = oc.ExecListDir(podName, project, dir) - Expect(stdOut).To(Not(ContainSubstring(("foobar.txt")))) - Expect(stdOut).To(Not(ContainSubstring(("testdir")))) + helper.DontMatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) }) It("should build when a new file and a new folder is added in the directory", func() { diff --git a/tests/integration/cmd_url_test.go b/tests/integration/cmd_url_test.go index d72879f92f3..905414e39c2 100644 --- a/tests/integration/cmd_url_test.go +++ b/tests/integration/cmd_url_test.go @@ -78,13 +78,17 @@ var _ = Describe("odo url command tests", func() { helper.CmdShouldPass("odo", "create", "nodejs", "--context", context, "--project", project, componentName) helper.CmdShouldPass("odo", "url", "create", url1, "--port", "8080", "--context", context, "--secure") + + stdout := helper.CmdShouldPass("odo", "url", "list", "--context", context) + helper.MatchAllInOutput(stdout, []string{url1, "Not Pushed", "true"}) + helper.CmdShouldPass("odo", "push", "--context", context) secureURL := helper.DetermineRouteURL(context) Expect(secureURL).To(ContainSubstring("https:")) helper.HttpWaitFor(secureURL, "Hello world from node.js!", 20, 1) - stdout := helper.CmdShouldPass("odo", "url", "list", "--context", context) + stdout = helper.CmdShouldPass("odo", "url", "list", "--context", context) helper.MatchAllInOutput(stdout, []string{secureURL, "Pushed", "true"}) helper.CmdShouldPass("odo", "delete", "-f", "--context", context) @@ -111,6 +115,25 @@ var _ = Describe("odo url command tests", func() { stdout = helper.CmdShouldPass("odo", "url", "describe", url1, "--context", context) helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", url1, "odo push"}) }) + + It("should be able to describe a url in CLI format and machine readable json format for a secure url", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "nodejs", "--app", "myapp", "--project", project, "--git", "https://github.com/openshift/nodejs-ex", "--context", context) + helper.CmdShouldPass("odo", "url", "create", "myurl", "--secure", "--context", context) + + actualURLDescribeJSON := helper.CmdShouldPass("odo", "url", "describe", "myurl", "-o", "json", "--context", context) + desiredURLDescribeJSON := fmt.Sprintf(`{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{ "name": "myurl","creationTimestamp": null},"spec":{"port": 8080,"secure": true,"kind": "route"},"status": {"state": "Not Pushed"}}`) + Expect(desiredURLDescribeJSON).Should(MatchJSON(actualURLDescribeJSON)) + + helper.CmdShouldPass("odo", "push", "--context", context) + + // odo url describe -o json + actualURLDescribeJSON = helper.CmdShouldPass("odo", "url", "describe", "myurl", "-o", "json", "--context", context) + // get the route URL + fullURLPath := helper.DetermineRouteURL(context) + pathNoHTTP := strings.Split(fullURLPath, "//")[1] + desiredURLDescribeJSON = fmt.Sprintf(`{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{ "name": "myurl","creationTimestamp": null},"spec":{"host":"%s","protocol": "https","port": 8080,"secure": true,"kind": "route"},"status": {"state": "Pushed"}}`, pathNoHTTP) + Expect(desiredURLDescribeJSON).Should(MatchJSON(actualURLDescribeJSON)) + }) }) Context("when listing urls using -o json flag", func() { @@ -131,20 +154,24 @@ var _ = Describe("odo url command tests", func() { actualURLListJSON := helper.CmdShouldPass("odo", "url", "list", "-o", "json") fullURLPath := helper.DetermineRouteURL("") pathNoHTTP := strings.Split(fullURLPath, "//")[1] - desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myurl","creationTimestamp":null},"spec":{"host":"%s","protocol":"http","port":8080,"secure":false},"status":{"state": "Pushed"}}]}`, pathNoHTTP) + desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myurl","creationTimestamp":null},"spec":{"host":"%s","protocol":"http","port":8080,"secure":false,"kind": "route"},"status":{"state": "Pushed"}}]}`, pathNoHTTP) Expect(desiredURLListJSON).Should(MatchJSON(actualURLListJSON)) }) It("should be able to list url in machine readable json format for a secure url", func() { helper.CmdShouldPass("odo", "create", "nodejs", "nodejs", "--app", "myapp", "--project", project, "--git", "https://github.com/openshift/nodejs-ex") helper.CmdShouldPass("odo", "url", "create", "myurl", "--secure") + actualURLListJSON := helper.CmdShouldPass("odo", "url", "list", "-o", "json") + desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myurl","creationTimestamp":null},"spec":{"port":8080,"secure":true,"kind": "route"},"status":{"state": "Not Pushed"}}]}`) + Expect(desiredURLListJSON).Should(MatchJSON(actualURLListJSON)) + helper.CmdShouldPass("odo", "push") // odo url list -o json - actualURLListJSON := helper.CmdShouldPass("odo", "url", "list", "-o", "json") + actualURLListJSON = helper.CmdShouldPass("odo", "url", "list", "-o", "json") fullURLPath := helper.DetermineRouteURL("") pathNoHTTP := strings.Split(fullURLPath, "//")[1] - desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myurl","creationTimestamp":null},"spec":{"host":"%s","protocol":"https","port":8080,"secure":true},"status":{"state": "Pushed"}}]}`, pathNoHTTP) + desiredURLListJSON = fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"myurl","creationTimestamp":null},"spec":{"host":"%s","protocol":"https","port":8080,"secure":true,"kind": "route"},"status":{"state": "Pushed"}}]}`, pathNoHTTP) Expect(desiredURLListJSON).Should(MatchJSON(actualURLListJSON)) }) }) diff --git a/tests/integration/cmd_watch_test.go b/tests/integration/cmd_watch_test.go index dcdd3c47cfd..b7f991c7d25 100644 --- a/tests/integration/cmd_watch_test.go +++ b/tests/integration/cmd_watch_test.go @@ -14,6 +14,7 @@ import ( var _ = Describe("odo watch command tests", func() { var project string var context string + var currentWorkingDirectory string // Setup up state for each test spec // create new project (not set as active) and new context directory for each test spec @@ -50,6 +51,22 @@ var _ = Describe("odo watch command tests", func() { }) }) + Context("when executing odo watch against an app that doesn't exist", func() { + JustBeforeEach(func() { + currentWorkingDirectory = helper.Getwd() + helper.Chdir(context) + }) + JustAfterEach(func() { + helper.Chdir(currentWorkingDirectory) + }) + It("should fail with proper error", func() { + helper.CopyExample(filepath.Join("source", "nodejs"), context) + helper.CmdShouldPass("odo", "component", "create", "nodejs", "--project", project) + output := helper.CmdShouldFail("odo", "watch", "--app", "dummy") + Expect(output).To(ContainSubstring("component does not exist")) + }) + }) + Context("when executing watch on a git source type component", func() { It("should fail", func() { helper.CmdShouldPass("odo", "create", "--context", context, "nodejs", "--git", "https://github.com/openshift/nodejs-ex.git") diff --git a/tests/integration/component.go b/tests/integration/component.go index cd459d5241f..acac04eda63 100644 --- a/tests/integration/component.go +++ b/tests/integration/component.go @@ -210,8 +210,7 @@ func componentTests(args ...string) { helper.CmdShouldPass("odo", append(args, "create", "nodejs", "cmp-git", "--project", project, "--git", "https://github.com/openshift/nodejs-ex", "--min-memory", "100Mi", "--max-memory", "300Mi", "--min-cpu", "0.1", "--max-cpu", "2", "--context", context, "--app", "testing")...) helper.ValidateLocalCmpExist(context, "Type,nodejs", "Name,cmp-git", "Application,testing", "MinCPU,100m") cmpList := helper.CmdShouldPass("odo", append(args, "list", "--context", context)...) - Expect(cmpList).To(ContainSubstring("cmp-git")) - Expect(cmpList).To(ContainSubstring("Not Pushed")) + helper.MatchAllInOutput(cmpList, []string{"cmp-git", "Not Pushed"}) helper.CmdShouldPass("odo", append(args, "delete", "-f", "--all", "--context", context)...) }) It("should list the state as unknown for disconnected cluster", func() { @@ -220,8 +219,7 @@ func componentTests(args ...string) { kubeconfigOrig := os.Getenv("KUBECONFIG") os.Setenv("KUBECONFIG", "/no/such/path") cmpList := helper.CmdShouldPass("odo", append(args, "list", "--context", context, "--v", "9")...) - Expect(cmpList).To(ContainSubstring("cmp-git")) - Expect(cmpList).To(ContainSubstring("Unknown")) + helper.MatchAllInOutput(cmpList, []string{"cmp-git", "Unknown"}) // KUBECONFIG defaults to ~/.kube/config so it can be empty in some cases. if kubeconfigOrig != "" { os.Setenv("KUBECONFIG", kubeconfigOrig) @@ -239,17 +237,18 @@ func componentTests(args ...string) { helper.CmdShouldPass("odo", "storage", "create", "storage-1", "--size", "1Gi", "--path", "/data1", "--context", context) helper.ValidateLocalCmpExist(context, "Type,nodejs", "Name,cmp-git", "Application,testing", "URL,0,Name,url-1") cmpDescribe := helper.CmdShouldPass("odo", append(args, "describe", "--context", context)...) - - Expect(cmpDescribe).To(ContainSubstring("cmp-git")) - Expect(cmpDescribe).To(ContainSubstring("nodejs")) - Expect(cmpDescribe).To(ContainSubstring("url-1")) - Expect(cmpDescribe).To(ContainSubstring("url-2")) - Expect(cmpDescribe).To(ContainSubstring("https://github.com/openshift/nodejs-ex")) - Expect(cmpDescribe).To(ContainSubstring("storage-1")) + helper.MatchAllInOutput(cmpDescribe, []string{ + "cmp-git", + "nodejs", + "url-1", + "url-2", + "https://github.com/openshift/nodejs-ex", + "storage-1", + }) cmpDescribeJSON, err := helper.Unindented(helper.CmdShouldPass("odo", append(args, "describe", "-o", "json", "--context", context)...)) Expect(err).Should(BeNil()) - expected, err := helper.Unindented(`{"kind": "Component","apiVersion": "odo.dev/v1alpha1","metadata": {"name": "cmp-git","namespace": "` + project + `","creationTimestamp": null},"spec":{"app": "testing","type":"nodejs","source": "https://github.com/openshift/nodejs-ex","sourceType": "git","urls": {"kind": "List", "apiVersion": "odo.dev/v1alpha1", "metadata": {}, "items": [{"kind": "url", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "url-1", "creationTimestamp": null}, "spec": {"port": 8080, "secure": false}, "status": {"state": "Not Pushed"}}, {"kind": "url", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "url-2", "creationTimestamp": null}, "spec": {"port": 8080, "secure": false}, "status": {"state": "Not Pushed"}}]},"storages": {"kind": "List", "apiVersion": "odo.dev/v1alpha1", "metadata": {}, "items": [{"kind": "storage", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "storage-1", "creationTimestamp": null}, "spec": {"size": "1Gi", "path": "/data1"}}]},"ports": ["8080/TCP", "8080/TCP"]},"status": {"state": "Not Pushed"}}`) + expected, err := helper.Unindented(`{"kind": "Component","apiVersion": "odo.dev/v1alpha1","metadata": {"name": "cmp-git","namespace": "` + project + `","creationTimestamp": null},"spec":{"app": "testing","type":"nodejs","source": "https://github.com/openshift/nodejs-ex","sourceType": "git","urls": {"kind": "List", "apiVersion": "odo.dev/v1alpha1", "metadata": {}, "items": [{"kind": "url", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "url-1", "creationTimestamp": null}, "spec": {"port": 8080, "secure": false, "kind": "route"}, "status": {"state": "Not Pushed"}}, {"kind": "url", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "url-2", "creationTimestamp": null}, "spec": {"port": 8080, "secure": false,"kind": "route"}, "status": {"state": "Not Pushed"}}]},"storages": {"kind": "List", "apiVersion": "odo.dev/v1alpha1", "metadata": {}, "items": [{"kind": "storage", "apiVersion": "odo.dev/v1alpha1", "metadata": {"name": "storage-1", "creationTimestamp": null}, "spec": {"size": "1Gi", "path": "/data1"}}]},"ports": ["8080/TCP"]},"status": {"state": "Not Pushed"}}`) Expect(err).Should(BeNil()) Expect(cmpDescribeJSON).To(Equal(expected)) @@ -290,11 +289,7 @@ func componentTests(args ...string) { helper.CmdShouldPass("odo", append(args, "create", "nodejs", "cmp-git-2", "--project", project, "--git", "https://github.com/openshift/nodejs-ex", "--context", context2, "--app", "testing")...) helper.ValidateLocalCmpExist(context2, "Type,nodejs", "Name,cmp-git-2", "Application,testing") cmpList := helper.CmdShouldPass("odo", append(args, "list", "--context", context2)...) - - Expect(cmpList).To(ContainSubstring("cmp-git")) - Expect(cmpList).To(ContainSubstring("cmp-git-2")) - Expect(cmpList).To(ContainSubstring("Not Pushed")) - Expect(cmpList).To(ContainSubstring("Pushed")) + helper.MatchAllInOutput(cmpList, []string{"cmp-git", "cmp-git-2", "Not Pushed", "Pushed"}) helper.CmdShouldPass("odo", append(args, "delete", "-f", "--all", "--context", context)...) helper.CmdShouldPass("odo", append(args, "delete", "-f", "--all", "--context", context2)...) @@ -305,8 +300,8 @@ func componentTests(args ...string) { // Since components catalog is constantly changing, we simply check to see if this command passes.. rather than checking the JSON each time. output := helper.CmdShouldPass("odo", "catalog", "list", "components", "-o", "json") - Expect(output).To(ContainSubstring("List")) - Expect(output).To(ContainSubstring("supportedTags")) + helper.MatchAllInOutput(output, []string{"List", "supportedTags"}) + Expect(output).ToNot(ContainSubstring("devfileItems")) }) It("binary component should not fail when --context is not set", func() { @@ -668,9 +663,7 @@ func componentTests(args ...string) { cmpListOutput := helper.CmdShouldPass("odo", append(args, "list")...) Expect(cmpListOutput).To(ContainSubstring(cmpName)) cmpDescribe := helper.CmdShouldPass("odo", append(args, "describe")...) - - Expect(cmpDescribe).To(ContainSubstring(cmpName)) - Expect(cmpDescribe).To(ContainSubstring("nodejs")) + helper.MatchAllInOutput(cmpDescribe, []string{cmpName, "nodejs"}) url := helper.DetermineRouteURL(context) Expect(cmpDescribe).To(ContainSubstring(url)) diff --git a/tests/integration/debug/cmd_debug_test.go b/tests/integration/debug/cmd_debug_test.go index 515ba65ed7e..3d350e0414f 100644 --- a/tests/integration/debug/cmd_debug_test.go +++ b/tests/integration/debug/cmd_debug_test.go @@ -18,9 +18,13 @@ import ( // we execute these tests serially var _ = Describe("odo debug command serial tests", func() { - var project string var context string + var namespace string + + // current directory and project (before eny test is run) so it can restored after all testing is done + var originalDir string + // Setup up state for each test spec // create new project (not set as active) and new context directory for each test spec // This is before every spec (It) @@ -28,21 +32,23 @@ var _ = Describe("odo debug command serial tests", func() { SetDefaultEventuallyTimeout(10 * time.Minute) SetDefaultConsistentlyDuration(30 * time.Second) context = helper.CreateNewContext() - project = helper.CreateRandProject() + namespace = helper.CreateRandProject() os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) + originalDir = helper.Getwd() }) // Clean up after the test // This is run after every Spec (It) AfterEach(func() { - helper.DeleteProject(project) + helper.Chdir(originalDir) + helper.DeleteProject(namespace) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) It("should auto-select a local debug port when the given local port is occupied", func() { helper.CopyExample(filepath.Join("source", "nodejs"), context) - helper.CmdShouldPass("odo", "component", "create", "nodejs:latest", "nodejs-cmp-"+project, "--project", project, "--context", context) + helper.CmdShouldPass("odo", "component", "create", "nodejs:latest", "nodejs-cmp-"+namespace, "--project", namespace, "--context", context) helper.CmdShouldPass("odo", "push", "--context", context) stopChannel := make(chan bool) @@ -66,7 +72,7 @@ var _ = Describe("odo debug command serial tests", func() { } freePort := "" - helper.WaitForCmdOut("odo", []string{"debug", "info", "--context", context}, 1, true, func(output string) bool { + helper.WaitForCmdOut("odo", []string{"debug", "info", "--context", context}, 1, false, func(output string) bool { if strings.Contains(output, "Debug is running") { splits := strings.SplitN(output, ":", 2) Expect(len(splits)).To(Equal(2)) diff --git a/tests/integration/devfile/cmd_devfile_catalog_test.go b/tests/integration/devfile/cmd_devfile_catalog_test.go index d2aa5f7a85a..68d90ccdca4 100644 --- a/tests/integration/devfile/cmd_devfile_catalog_test.go +++ b/tests/integration/devfile/cmd_devfile_catalog_test.go @@ -11,9 +11,10 @@ import ( ) var _ = Describe("odo devfile catalog command tests", func() { - var project string - var context string - var currentWorkingDirectory string + var project, context, currentWorkingDirectory, originalKubeconfig string + + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() // This is run after every Spec (It) var _ = BeforeEach(func() { @@ -21,26 +22,20 @@ var _ = Describe("odo devfile catalog command tests", func() { context = helper.CreateNewContext() os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") - if os.Getenv("KUBERNETES") == "true" { - homeDir := helper.GetUserHomeDir() - kubeConfigFile := helper.CopyKubeConfigFile(filepath.Join(homeDir, ".kube", "config"), filepath.Join(context, "config")) - project = helper.CreateRandNamespace(kubeConfigFile) - } else { - project = helper.CreateRandProject() - } + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + project = cliRunner.CreateRandNamespaceProject() currentWorkingDirectory = helper.Getwd() helper.Chdir(context) }) // This is run after every Spec (It) var _ = AfterEach(func() { - if os.Getenv("KUBERNETES") == "true" { - helper.DeleteNamespace(project) - os.Unsetenv("KUBECONFIG") - } else { - helper.DeleteProject(project) - } + cliRunner.DeleteNamespaceProject(project) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) @@ -52,7 +47,8 @@ var _ = Describe("odo devfile catalog command tests", func() { "Odo Devfile Components", "NAME", "java-spring-boot", - "openLiberty", + "java-openliberty", + "quarkus", "DESCRIPTION", "REGISTRY", "SUPPORTED", @@ -69,6 +65,7 @@ var _ = Describe("odo devfile catalog command tests", func() { "NAME", "java-spring-boot", "java-maven", + "quarkus", "php-mysql", "DESCRIPTION", "REGISTRY", @@ -77,4 +74,35 @@ var _ = Describe("odo devfile catalog command tests", func() { helper.MatchAllInOutput(output, wantOutput) }) }) + + Context("When executing catalog list components with -o json flag", func() { + It("should list devfile components in json format", func() { + output := helper.CmdShouldPass("odo", "catalog", "list", "components", "-o", "json") + wantOutput := []string{ + "odo.dev/v1alpha1", + "openLiberty", + "java-spring-boot", + "nodejs", + "quarkus", + "php-mysql", + "maven", + "golang", + "java-maven", + } + helper.MatchAllInOutput(output, wantOutput) + }) + }) + + Context("When executing catalog list components with registry that is not set up properly", func() { + It("should list components from valid registry", func() { + helper.CmdShouldPass("odo", "registry", "add", "fake", "http://fake") + output := helper.CmdShouldPass("odo", "catalog", "list", "components") + helper.MatchAllInOutput(output, []string{ + "Odo Devfile Components", + "java-spring-boot", + "quarkus", + }) + helper.CmdShouldPass("odo", "registry", "delete", "fake", "-f") + }) + }) }) diff --git a/tests/integration/devfile/cmd_devfile_create_test.go b/tests/integration/devfile/cmd_devfile_create_test.go index a3e749a7f0f..eb3be0d2ef3 100644 --- a/tests/integration/devfile/cmd_devfile_create_test.go +++ b/tests/integration/devfile/cmd_devfile_create_test.go @@ -15,32 +15,36 @@ import ( var _ = Describe("odo devfile create command tests", func() { const devfile = "devfile.yaml" const envFile = ".odo/env/env.yaml" - var namespace string - var context string - var currentWorkingDirectory string + var namespace, context, currentWorkingDirectory, devfilePath, originalKubeconfig string + + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() // This is run after every Spec (It) var _ = BeforeEach(func() { SetDefaultEventuallyTimeout(10 * time.Minute) - namespace = helper.CreateRandProject() context = helper.CreateNewContext() - currentWorkingDirectory = helper.Getwd() - helper.Chdir(context) os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() + currentWorkingDirectory = helper.Getwd() + helper.Chdir(context) }) // This is run after every Spec (It) var _ = AfterEach(func() { - helper.DeleteProject(namespace) + cliRunner.DeleteNamespaceProject(namespace) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) Context("Enabling experimental preference should show a disclaimer", func() { It("checks that the experimental warning appears for create", func() { - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") helper.CopyExample(filepath.Join("source", "nodejs"), context) // Check that it will contain the experimental mode output @@ -48,7 +52,14 @@ var _ = Describe("odo devfile create command tests", func() { Expect(helper.CmdShouldPass("odo", "create", "nodejs")).To(ContainSubstring(experimentalOutputMsg)) }) + }) + Context("Disabling experimental preference should show a disclaimer", func() { + JustBeforeEach(func() { + if os.Getenv("KUBERNETES") == "true" { + Skip("Skipping test because s2i image is not supported on Kubernetes cluster") + } + }) It("checks that the experimental warning does *not* appear when Experimental is set to false for create", func() { helper.CmdShouldPass("odo", "preference", "set", "Experimental", "false", "-f") helper.CopyExample(filepath.Join("source", "nodejs"), context) @@ -61,39 +72,75 @@ var _ = Describe("odo devfile create command tests", func() { Context("When executing odo create with devfile component type argument", func() { It("should successfully create the devfile component", func() { - helper.CmdShouldPass("odo", "create", "openLiberty") + helper.CmdShouldPass("odo", "create", "java-openliberty") + }) + + It("should fail to create the devfile componet with invalid component type", func() { + fakeComponentName := "fake-component" + output := helper.CmdShouldFail("odo", "create", fakeComponentName) + var expectedString string + if os.Getenv("KUBERNETES") == "true" { + expectedString = "component type not found" + } else { + expectedString = "component type \"" + fakeComponentName + "\" not found" + } + helper.MatchAllInOutput(output, []string{expectedString}) }) }) Context("When executing odo create with devfile component type and component name arguments", func() { - It("should successfully create the devfile component", func() { + It("should successfully create the devfile component with valid component name", func() { componentName := helper.RandString(6) - helper.CmdShouldPass("odo", "create", "openLiberty", componentName) + helper.CmdShouldPass("odo", "create", "java-openliberty", componentName) + }) + + It("should fail to create the devfile component with component name that contains invalid character", func() { + componentName := "BAD@123" + output := helper.CmdShouldFail("odo", "create", "java-openliberty", componentName) + helper.MatchAllInOutput(output, []string{"Contain only lowercase alphanumeric characters or ‘-’"}) + }) + + It("should fail to create the devfile component with component name that contains all numeric values", func() { + componentName := "123456" + output := helper.CmdShouldFail("odo", "create", "java-openliberty", componentName) + helper.MatchAllInOutput(output, []string{"Must not contain all numeric values"}) + }) + + It("should fail to create the devfile component with componet name contains more than 63 characters", func() { + componentName := helper.RandString(64) + output := helper.CmdShouldFail("odo", "create", "java-openliberty", componentName) + helper.MatchAllInOutput(output, []string{"Contain at most 63 characters"}) }) }) Context("When executing odo create with devfile component type argument and --project flag", func() { It("should successfully create the devfile component", func() { componentNamespace := helper.RandString(6) - helper.CmdShouldPass("odo", "create", "openLiberty", "--project", componentNamespace) + helper.CmdShouldPass("odo", "create", "java-openliberty", "--project", componentNamespace) }) }) Context("When executing odo create with devfile component type argument and --registry flag", func() { - It("should successfully create the devfile component", func() { + It("should successfully create the devfile component if specified registry is valid", func() { componentRegistry := "DefaultDevfileRegistry" - helper.CmdShouldPass("odo", "create", "openLiberty", "--registry", componentRegistry) + helper.CmdShouldPass("odo", "create", "java-openliberty", "--registry", componentRegistry) + }) + + It("should fail to create the devfile component if specified registry is invalid", func() { + componentRegistry := "fake" + output := helper.CmdShouldFail("odo", "create", "java-openliberty", "--registry", componentRegistry) + helper.MatchAllInOutput(output, []string{"Registry fake doesn't exist, please specify a valid registry via --registry"}) }) }) Context("When executing odo create with devfile component type argument and --context flag", func() { It("should successfully create the devfile component in the context", func() { newContext := path.Join(context, "newContext") - devfilePath := filepath.Join(newContext, devfile) + devfilePath = filepath.Join(newContext, devfile) envFilePath := filepath.Join(newContext, envFile) helper.MakeDir(newContext) - helper.CmdShouldPass("odo", "create", "openLiberty", "--context", newContext) + helper.CmdShouldPass("odo", "create", "java-openliberty", "--context", newContext) output := util.CheckPathExists(devfilePath) Expect(output).Should(BeTrue()) output = util.CheckPathExists(envFilePath) @@ -102,45 +149,67 @@ var _ = Describe("odo devfile create command tests", func() { }) }) - Context("When executing odo create with devfile component name that contains unsupported character", func() { - It("should failed with devfile component name is not valid and prompt supported character", func() { - componentName := "BAD@123" - output := helper.CmdShouldFail("odo", "create", "openLiberty", componentName) - helper.MatchAllInOutput(output, []string{"Contain only lowercase alphanumeric characters or ‘-’"}) - }) - }) + Context("When executing odo create with existing devfile", func() { + Context("When devfile exists in user's working directory", func() { + JustBeforeEach(func() { + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", devfile), filepath.Join(context, devfile)) + }) - Context("When executing odo create with devfile component name that contains all numeric values", func() { - It("should failed with devfile component name is not valid and prompt container name must not contain all numeric values", func() { - componentName := "123456" - output := helper.CmdShouldFail("odo", "create", "openLiberty", componentName) - helper.MatchAllInOutput(output, []string{"Must not contain all numeric values"}) - }) - }) + It("should successfully create the devfile componet", func() { + helper.CmdShouldPass("odo", "create", "nodejs") + }) - Context("When executing odo create with devfile component name that contains more than 63 characters", func() { - It("should failed with devfile component name is not valid and prompt container name contains at most 63 characters", func() { - componentName := helper.RandString(64) - output := helper.CmdShouldFail("odo", "create", "openLiberty", componentName) - helper.MatchAllInOutput(output, []string{"Contain at most 63 characters"}) + It("should successfully create the devfile component with --devfile points to the same devfile", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "--devfile", "./devfile.yaml") + }) + + It("should fail to create the devfile component with more than 1 arguments are passed in", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "nodejs") + }) + + It("should fail to create the devfile component with --devfile points to different devfile", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "--devfile", "/path/to/file") + }) }) - }) - Context("When executing odo create with an invalid devfile component", func() { - It("should fail with please run 'odo catalog list components'", func() { - fakeComponentName := "fake-component" - output := helper.CmdShouldFail("odo", "create", fakeComponentName) - expectedString := "\"" + fakeComponentName + "\" not found" - helper.MatchAllInOutput(output, []string{expectedString}) + Context("When devfile exists not in user's working directory and user specify the devfile path via --devfile", func() { + JustBeforeEach(func() { + newContext := path.Join(context, "newContext") + devfilePath = filepath.Join(newContext, devfile) + helper.MakeDir(newContext) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", devfile), devfilePath) + }) + + It("should successfully create the devfile component with valid file system path", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "--devfile", devfilePath) + }) + + It("should successfully create the devfile component with valid specifies URL path", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "--devfile", "https://raw.githubusercontent.com/elsony/devfile-registry/master/devfiles/nodejs/devfile.yaml") + }) + + It("should fail to create the devfile component with invalid file system path", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "--devfile", "#@!") + }) + + It("should fail to create the devfile component with invalid URL path", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "--devfile", "://www.example.com/") + }) + + It("should fail to create the devfile component with more than 1 arguments are passed in", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "nodejs", "--devfile", devfilePath) + }) + + It("should fail to create the devfile component with --registry specified", func() { + helper.CmdShouldFail("odo", "create", "nodejs", "--devfile", devfilePath, "--registry", "DefaultDevfileRegistry") + }) }) }) Context("When executing odo create with devfile component and --downloadSource flag", func() { - It("should succesfully create the compoment and download the source", func() { - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + It("should successfully create the component and download the source", func() { contextDevfile := helper.CreateNewContext() helper.Chdir(contextDevfile) - devfile := "devfile.yaml" helper.CmdShouldPass("odo", "create", "nodejs", "--downloadSource") expectedFiles := []string{"package.json", "package-lock.json", "README.md", devfile} Expect(helper.VerifyFilesExist(contextDevfile, expectedFiles)).To(Equal(true)) @@ -149,12 +218,18 @@ var _ = Describe("odo devfile create command tests", func() { }) }) + Context("When executing odo create with component with no devBuild command", func() { + It("should successfully create the devfile component", func() { + // Quarkus devfile has no devBuild command + output := helper.CmdShouldPass("odo", "create", "quarkus") + helper.MatchAllInOutput(output, []string{"Please use `odo push` command to create the component with source deployed"}) + }) + }) + Context("When executing odo create with devfile component and --downloadSource flag with a valid project", func() { - It("should succesfully create the compoment specified and download the source", func() { - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + It("should successfully create the component specified and download the source", func() { contextDevfile := helper.CreateNewContext() helper.Chdir(contextDevfile) - devfile := "devfile.yaml" helper.CmdShouldPass("odo", "create", "nodejs", "--downloadSource=nodejs-web-app") expectedFiles := []string{"package.json", "package-lock.json", "README.md", devfile} Expect(helper.VerifyFilesExist(contextDevfile, expectedFiles)).To(Equal(true)) @@ -183,8 +258,7 @@ var _ = Describe("odo devfile create command tests", func() { // Currently these tests need interactive mode in order to set the name of the component. // Once this feature is added we can change these tests. //Context("When executing odo create with devfile component and --downloadSource flag with github type", func() { - // It("should succesfully create the compoment and download the source", func() { - // helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + // It("should successfully create the component and download the source", func() { // contextDevfile := helper.CreateNewContext() // helper.Chdir(contextDevfile) // devfile := "devfile.yaml" @@ -202,8 +276,7 @@ var _ = Describe("odo devfile create command tests", func() { //}) //Context("When executing odo create with devfile component and --downloadSource flag with zip type", func() { - // It("should create the compoment and download the source", func() { - // helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + // It("should create the component and download the source", func() { // contextDevfile := helper.CreateNewContext() // helper.Chdir(contextDevfile) // devfile := "devfile.yaml" @@ -222,4 +295,34 @@ var _ = Describe("odo devfile create command tests", func() { // helper.DeleteDir(contextDevfile) // }) //}) + + // Context("When executing odo create with devfile component, --downloadSource flag and sparseContextDir has a valid value", func() { + // It("should only extract the specified path in the sparseContextDir field", func() { + // contextDevfile := helper.CreateNewContext() + // helper.Chdir(contextDevfile) + // devfile := "devfile.yaml" + // helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-sparseCheckoutDir"), filepath.Join(contextDevfile, devfile)) + // componentNamespace := helper.RandString(6) + // helper.CmdShouldPass("odo", "create", "--downloadSource", "--project", componentNamespace) + // expectedFiles := []string{"app.js", devfile} + // Expect(helper.VerifyFilesExist(contextDevfile, expectedFiles)).To(Equal(true)) + // helper.DeleteDir(contextDevfile) + // }) + // }) + + // Context("When executing odo create with devfile component, --downloadSource flag and sparseContextDir has an invalid value", func() { + // It("should fail and alert the user that the specified path in sparseContextDir does not exist", func() { + // contextDevfile := helper.CreateNewContext() + // helper.Chdir(contextDevfile) + // devfile := "devfile.yaml" + // devfilePath := filepath.Join(contextDevfile, devfile) + // helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-sparseCheckoutDir"), devfilePath) + // helper.ReplaceDevfileField(devfilePath, "sparseCheckoutDir", "/invalid/") + // componentNamespace := helper.RandString(6) + // output := helper.CmdShouldFail("odo", "create", "--downloadSource", "--project", componentNamespace) + // expectedString := "no files were unzipped, ensure that the project repo is not empty or that sparseCheckoutDir has a valid path" + // helper.MatchAllInOutput(output, []string{expectedString}) + // helper.DeleteDir(contextDevfile) + // }) + // }) }) diff --git a/tests/integration/devfile/cmd_devfile_debug_test.go b/tests/integration/devfile/cmd_devfile_debug_test.go new file mode 100644 index 00000000000..341542f9ac0 --- /dev/null +++ b/tests/integration/devfile/cmd_devfile_debug_test.go @@ -0,0 +1,180 @@ +package devfile + +import ( + "github.com/openshift/odo/pkg/util" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/openshift/odo/tests/helper" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("odo devfile debug command tests", func() { + var namespace, context, componentName, currentWorkingDirectory, projectDirPath, originalKubeconfig string + var projectDir = "/projectDir" + + // Using program command according to cliRunner in devfile + cliRunner := helper.GetCliRunner() + + // This is run after every Spec (It) + var _ = BeforeEach(func() { + SetDefaultEventuallyTimeout(10 * time.Minute) + context = helper.CreateNewContext() + os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) + + // Devfile push requires experimental mode to be set + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() + currentWorkingDirectory = helper.Getwd() + componentName = helper.RandString(6) + helper.Chdir(context) + projectDirPath = context + projectDir + }) + + // Clean up after the test + // This is run after every Spec (It) + var _ = AfterEach(func() { + cliRunner.DeleteNamespaceProject(namespace) + helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) + helper.DeleteDir(context) + os.Unsetenv("GLOBALODOCONFIG") + }) + + Context("odo debug on a nodejs:latest component", func() { + It("check that machine output debug information works", func() { + helper.CmdShouldPass("git", "clone", "https://github.com/che-samples/web-nodejs-sample.git", projectDirPath) + helper.Chdir(projectDirPath) + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), projectDirPath) + helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml") + helper.CmdShouldPass("odo", "push", "--debug") + + httpPort, err := util.HttpGetFreePort() + Expect(err).NotTo(HaveOccurred()) + freePort := strconv.Itoa(httpPort) + + stopChannel := make(chan bool) + go func() { + helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort) + }() + + // Make sure that the debug information output, outputs correctly. + // We do *not* check the json output since the debugProcessID will be different each time. + helper.WaitForCmdOut("odo", []string{"debug", "info", "-o", "json"}, 1, false, func(output string) bool { + if strings.Contains(output, `"kind": "OdoDebugInfo"`) && + strings.Contains(output, `"localPort": `+freePort) { + return true + } + return false + }) + + stopChannel <- true + }) + + It("should expect a ws connection when tried to connect on default debug port locally", func() { + helper.CmdShouldPass("git", "clone", "https://github.com/che-samples/web-nodejs-sample.git", projectDirPath) + helper.Chdir(projectDirPath) + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), projectDirPath) + helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml") + helper.CmdShouldPass("odo", "push") + helper.CmdShouldPass("odo", "push", "--debug") + + stopChannel := make(chan bool) + go func() { + helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward") + }() + + // 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET + // We are just using this to validate if nodejs agent is listening on the other side + helper.HttpWaitForWithStatus("http://localhost:5858", "WebSockets request was expected", 12, 5, 400) + stopChannel <- true + }) + + }) + + Context("odo debug info should work on a odo component", func() { + It("should start a debug session and run debug info on a running debug session", func() { + helper.CmdShouldPass("git", "clone", "https://github.com/che-samples/web-nodejs-sample.git", projectDirPath) + helper.Chdir(projectDirPath) + + helper.CmdShouldPass("odo", "create", "nodejs", "nodejs-cmp-"+namespace, "--project", namespace) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), projectDirPath) + helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml") + helper.CmdShouldPass("odo", "push", "--debug") + + httpPort, err := util.HttpGetFreePort() + Expect(err).NotTo(HaveOccurred()) + freePort := strconv.Itoa(httpPort) + + stopChannel := make(chan bool) + go func() { + helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort) + }() + + // 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET + // We are just using this to validate if nodejs agent is listening on the other side + helper.HttpWaitForWithStatus("http://localhost:"+freePort, "WebSockets request was expected", 12, 5, 400) + runningString := helper.CmdShouldPass("odo", "debug", "info") + Expect(runningString).To(ContainSubstring(freePort)) + Expect(helper.ListFilesInDir(os.TempDir())).To(ContainElement(namespace + "-nodejs-cmp-" + namespace + "-odo-debug.json")) + stopChannel <- true + }) + + It("should start a debug session and run debug info on a closed debug session", func() { + helper.CmdShouldPass("git", "clone", "https://github.com/che-samples/web-nodejs-sample.git", projectDirPath) + helper.Chdir(projectDirPath) + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), projectDirPath) + helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml") + helper.CmdShouldPass("odo", "push", "--debug") + + httpPort, err := util.HttpGetFreePort() + Expect(err).NotTo(HaveOccurred()) + freePort := strconv.Itoa(httpPort) + + stopChannel := make(chan bool) + go func() { + helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward", "--local-port", freePort) + }() + + // 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET + // We are just using this to validate if nodejs agent is listening on the other side + helper.HttpWaitForWithStatus("http://localhost:"+freePort, "WebSockets request was expected", 12, 5, 400) + runningString := helper.CmdShouldPass("odo", "debug", "info") + Expect(runningString).To(ContainSubstring(freePort)) + stopChannel <- true + failString := helper.CmdShouldFail("odo", "debug", "info") + Expect(failString).To(ContainSubstring("not running")) + + // according to https://golang.org/pkg/os/#Signal On Windows, sending os.Interrupt to a process with os.Process.Signal is not implemented + // discussion on the go repo https://github.com/golang/go/issues/6720 + // session.Interrupt() will not work as it internally uses syscall.SIGINT + // thus debug port-forward won't stop running + // the solution is to use syscall.SIGKILL for windows but this will kill the process immediately + // and the cleaning and closing tasks for debug port-forward won't run and the debug info file won't be cleared + // thus we skip this last check + // CTRL_C_EVENTS from the terminal works fine https://github.com/golang/go/issues/6720#issuecomment-66087737 + // here's a hack to generate the event https://golang.org/cl/29290044 + // but the solution is unacceptable https://github.com/golang/go/issues/6720#issuecomment-66087749 + if runtime.GOOS != "windows" { + Expect(helper.ListFilesInDir(os.TempDir())).To(Not(ContainElement(namespace + "-app" + "-nodejs-cmp-" + namespace + "-odo-debug.json"))) + } + + }) + }) +}) diff --git a/tests/integration/devfile/cmd_devfile_delete_test.go b/tests/integration/devfile/cmd_devfile_delete_test.go index 5e67256e8eb..f31d841cbcc 100644 --- a/tests/integration/devfile/cmd_devfile_delete_test.go +++ b/tests/integration/devfile/cmd_devfile_delete_test.go @@ -12,33 +12,35 @@ import ( ) var _ = Describe("odo devfile delete command tests", func() { - var namespace, context, currentWorkingDirectory, componentName string + var namespace, context, currentWorkingDirectory, componentName, originalKubeconfig string - // TODO: all oc commands in all devfile related test should get replaced by kubectl - // TODO: to goal is not to use "oc" - oc := helper.NewOcRunner("oc") + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() // This is run after every Spec (It) var _ = BeforeEach(func() { SetDefaultEventuallyTimeout(10 * time.Minute) - namespace = helper.CreateRandProject() context = helper.CreateNewContext() - currentWorkingDirectory = helper.Getwd() - componentName = helper.RandString(6) - - helper.Chdir(context) - os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) // Devfile commands require experimental mode to be set helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() + currentWorkingDirectory = helper.Getwd() + componentName = helper.RandString(6) + helper.Chdir(context) }) // Clean up after the test // This is run after every Spec (It) var _ = AfterEach(func() { - helper.DeleteProject(namespace) + cliRunner.DeleteNamespaceProject(namespace) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) @@ -47,20 +49,19 @@ var _ = Describe("odo devfile delete command tests", func() { It("should delete the component created from the devfile and also the owned resources", func() { helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) - helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) helper.CmdShouldPass("odo", "url", "create", "example", "--host", "1.2.3.4.nip.io") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) - helper.CmdShouldPass("odo", "delete", "--devfile", "devfile.yaml", "--project", namespace, "-f") + helper.CmdShouldPass("odo", "delete", "--project", namespace, "-f") - oc.WaitAndCheckForExistence("deployments", namespace, 1) - oc.WaitAndCheckForExistence("pods", namespace, 1) - oc.WaitAndCheckForExistence("services", namespace, 1) - oc.WaitAndCheckForExistence("ingress", namespace, 1) + resourceTypes := []string{"deployments", "pods", "services", "ingress"} + for _, resourceType := range resourceTypes { + cliRunner.WaitAndCheckForExistence(resourceType, namespace, 1) + } }) }) @@ -72,13 +73,13 @@ var _ = Describe("odo devfile delete command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) helper.CmdShouldPass("odo", "url", "create", "example", "--host", "1.2.3.4.nip.io", "--context", context) - helper.CmdShouldPass("odo", "delete", "--devfile", "devfile.yaml", "--project", namespace, "-f", "--all") + helper.CmdShouldPass("odo", "delete", "--project", namespace, "-f", "--all") - oc.WaitAndCheckForExistence("deployments", namespace, 1) + cliRunner.WaitAndCheckForExistence("deployments", namespace, 1) files := helper.ListFilesInDir(context) Expect(files).To(Not(ContainElement(".odo"))) diff --git a/tests/integration/devfile/cmd_devfile_push_test.go b/tests/integration/devfile/cmd_devfile_push_test.go index 4fdeb66a0f8..cfa43144b2c 100644 --- a/tests/integration/devfile/cmd_devfile_push_test.go +++ b/tests/integration/devfile/cmd_devfile_push_test.go @@ -14,34 +14,36 @@ import ( ) var _ = Describe("odo devfile push command tests", func() { - var namespace, context, cmpName, currentWorkingDirectory string + var namespace, context, cmpName, currentWorkingDirectory, originalKubeconfig string var sourcePath = "/projects/nodejs-web-app" - // TODO: all oc commands in all devfile related test should get replaced by kubectl - // TODO: to goal is not to use "oc" - oc := helper.NewOcRunner("oc") + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() // This is run after every Spec (It) var _ = BeforeEach(func() { SetDefaultEventuallyTimeout(10 * time.Minute) - namespace = helper.CreateRandProject() context = helper.CreateNewContext() - currentWorkingDirectory = helper.Getwd() - cmpName = helper.RandString(6) - - helper.Chdir(context) - os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) // Devfile push requires experimental mode to be set helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() + currentWorkingDirectory = helper.Getwd() + cmpName = helper.RandString(6) + helper.Chdir(context) }) // Clean up after the test // This is run after every Spec (It) var _ = AfterEach(func() { - helper.DeleteProject(namespace) + cliRunner.DeleteNamespaceProject(namespace) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) @@ -55,15 +57,15 @@ var _ = Describe("odo devfile push command tests", func() { helper.RenameFile("devfile.yaml", "devfile-old.yaml") helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-no-endpoints.yaml"), filepath.Join(context, "devfile.yaml")) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - output := oc.GetServices(namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) + output := cliRunner.GetServices(namespace) Expect(output).NotTo(ContainSubstring(cmpName)) helper.RenameFile("devfile-old.yaml", "devfile.yaml") - output = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + output = helper.CmdShouldPass("odo", "push", "--project", namespace) Expect(output).To(ContainSubstring("Changes successfully pushed to component")) - output = oc.GetServices(namespace) + output = cliRunner.GetServices(namespace) Expect(output).To(ContainSubstring(cmpName)) }) @@ -73,12 +75,12 @@ var _ = Describe("odo devfile push command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + output := helper.CmdShouldPass("odo", "push", "--project", namespace) Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // update devfile and push again helper.ReplaceString("devfile.yaml", "name: FOO", "name: BAR") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) }) It("checks that odo push works outside of the context directory", func() { @@ -89,7 +91,7 @@ var _ = Describe("odo devfile push command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--context", context) + output := helper.CmdShouldPass("odo", "push", "--context", context) Expect(output).To(ContainSubstring("Changes successfully pushed to component")) }) @@ -97,27 +99,42 @@ var _ = Describe("odo devfile push command tests", func() { utils.ExecPushToTestFileChanges(context, cmpName, namespace) }) + It("checks that odo push with -o json displays machine readable JSON event output", func() { + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push", "-o", "json", "--project", namespace) + utils.AnalyzePushConsoleOutput(output) + + // update devfile and push again + helper.ReplaceString("devfile.yaml", "name: FOO", "name: BAR") + output = helper.CmdShouldPass("odo", "push", "-o", "json", "--project", namespace) + utils.AnalyzePushConsoleOutput(output) + + }) + It("should be able to create a file, push, delete, then push again propagating the deletions", func() { newFilePath := filepath.Join(context, "foobar.txt") newDirPath := filepath.Join(context, "testdir") utils.ExecPushWithNewFileAndDir(context, cmpName, namespace, newFilePath, newDirPath) // Check to see if it's been pushed (foobar.txt abd directory testdir) - podName := oc.GetRunningPodNameByComponent(cmpName, namespace) + podName := cliRunner.GetRunningPodNameByComponent(cmpName, namespace) - stdOut := oc.ExecListDir(podName, namespace, sourcePath) - Expect(stdOut).To(ContainSubstring(("foobar.txt"))) - Expect(stdOut).To(ContainSubstring(("testdir"))) + stdOut := cliRunner.ExecListDir(podName, namespace, sourcePath) + helper.MatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) // Now we delete the file and dir and push helper.DeleteDir(newFilePath) helper.DeleteDir(newDirPath) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace, "-v4") + helper.CmdShouldPass("odo", "push", "--project", namespace, "-v4") // Then check to see if it's truly been deleted - stdOut = oc.ExecListDir(podName, namespace, sourcePath) - Expect(stdOut).To(Not(ContainSubstring(("foobar.txt")))) - Expect(stdOut).To(Not(ContainSubstring(("testdir")))) + stdOut = cliRunner.ExecListDir(podName, namespace, sourcePath) + helper.DontMatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) }) It("should delete the files from the container if its removed locally", func() { @@ -126,13 +143,13 @@ var _ = Describe("odo devfile push command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) // Check to see if it's been pushed (foobar.txt abd directory testdir) - podName := oc.GetRunningPodNameByComponent(cmpName, namespace) + podName := cliRunner.GetRunningPodNameByComponent(cmpName, namespace) var statErr error - oc.CheckCmdOpInRemoteDevfilePod( + cliRunner.CheckCmdOpInRemoteDevfilePod( podName, "", namespace, @@ -144,9 +161,9 @@ var _ = Describe("odo devfile push command tests", func() { ) Expect(statErr).ToNot(HaveOccurred()) Expect(os.Remove(filepath.Join(context, "app", "app.js"))).NotTo(HaveOccurred()) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) - oc.CheckCmdOpInRemoteDevfilePod( + cliRunner.CheckCmdOpInRemoteDevfilePod( podName, "", namespace, @@ -168,11 +185,11 @@ var _ = Describe("odo devfile push command tests", func() { utils.ExecDefaultDevfileCommands(context, cmpName, namespace) // Check to see if it's been pushed (foobar.txt abd directory testdir) - podName := oc.GetRunningPodNameByComponent(cmpName, namespace) + podName := cliRunner.GetRunningPodNameByComponent(cmpName, namespace) var statErr error var cmdOutput string - oc.CheckCmdOpInRemoteDevfilePod( + cliRunner.CheckCmdOpInRemoteDevfilePod( podName, "runtime", namespace, @@ -191,41 +208,49 @@ var _ = Describe("odo devfile push command tests", func() { helper.CmdShouldPass("odo", "create", "java-spring-boot", "--project", namespace, cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) - - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(output).To(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devbuild command \"/artifacts/bin/build-container-full.sh\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + helper.CopyExampleDevFile(filepath.Join("source", "devfilesV1", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.MatchAllInOutput(output, []string{ + "Executing devinit command \"echo hello", + "Executing devbuild command \"/artifacts/bin/build-container-full.sh\"", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) }) It("should execute devinit and devrun commands if present", func() { helper.CmdShouldPass("odo", "create", "java-spring-boot", "--project", namespace, cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile-init-without-build.yaml"), filepath.Join(context, "devfile.yaml")) + helper.CopyExampleDevFile(filepath.Join("source", "devfilesV1", "springboot", "devfile-init-without-build.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(output).To(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + output := helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.MatchAllInOutput(output, []string{ + "Executing devinit command \"echo hello", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) }) It("should only execute devinit command once if component is already created", func() { helper.CmdShouldPass("odo", "create", "java-spring-boot", "--project", namespace, cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) + helper.CopyExampleDevFile(filepath.Join("source", "devfilesV1", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(output).To(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devbuild command \"/artifacts/bin/build-container-full.sh\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + output := helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.MatchAllInOutput(output, []string{ + "Executing devinit command \"echo hello", + "Executing devbuild command \"/artifacts/bin/build-container-full.sh\"", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) // Need to force so build and run get triggered again with the component already created. - output = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace, "-f") + output = helper.CmdShouldPass("odo", "push", "--namespace", namespace, "-f") Expect(output).NotTo(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devbuild command \"/artifacts/bin/build-container-full.sh\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + helper.MatchAllInOutput(output, []string{ + "Executing devbuild command \"/artifacts/bin/build-container-full.sh\"", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) }) It("should be able to handle a missing devinit command", func() { @@ -234,10 +259,12 @@ var _ = Describe("odo devfile push command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-without-devinit.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) + output := helper.CmdShouldPass("odo", "push", "--namespace", namespace) Expect(output).NotTo(ContainSubstring("Executing devinit command")) - Expect(output).To(ContainSubstring("Executing devbuild command \"npm install\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"nodemon app.js\"")) + helper.MatchAllInOutput(output, []string{ + "Executing devbuild command \"npm install\"", + "Executing devrun command \"nodemon app.js\"", + }) }) It("should be able to handle a missing devbuild command", func() { @@ -266,31 +293,19 @@ var _ = Describe("odo devfile push command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-volumes.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(output).To(ContainSubstring("Executing devinit command")) - Expect(output).To(ContainSubstring("Executing devbuild command")) - Expect(output).To(ContainSubstring("Executing devrun command")) + output := helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.MatchAllInOutput(output, []string{ + "Executing devbuild command", + "Executing devrun command", + }) // Check to see if it's been pushed (foobar.txt abd directory testdir) - podName := oc.GetRunningPodNameByComponent(cmpName, namespace) + podName := cliRunner.GetRunningPodNameByComponent(cmpName, namespace) var statErr error var cmdOutput string - oc.CheckCmdOpInRemoteDevfilePod( - podName, - "runtime", - namespace, - []string{"cat", "/data/myfile-init.log"}, - func(cmdOp string, err error) bool { - cmdOutput = cmdOp - statErr = err - return true - }, - ) - Expect(statErr).ToNot(HaveOccurred()) - Expect(cmdOutput).To(ContainSubstring("init")) - oc.CheckCmdOpInRemoteDevfilePod( + cliRunner.CheckCmdOpInRemoteDevfilePod( podName, "runtime2", namespace, @@ -304,7 +319,7 @@ var _ = Describe("odo devfile push command tests", func() { Expect(statErr).ToNot(HaveOccurred()) Expect(cmdOutput).To(ContainSubstring("hello")) - oc.CheckCmdOpInRemoteDevfilePod( + cliRunner.CheckCmdOpInRemoteDevfilePod( podName, "runtime2", namespace, @@ -319,7 +334,7 @@ var _ = Describe("odo devfile push command tests", func() { volumesMatched := false // check the volume name and mount paths for the containers - volNamesAndPaths := oc.GetVolumeMountNamesandPathsFromContainer(cmpName, "runtime", namespace) + volNamesAndPaths := cliRunner.GetVolumeMountNamesandPathsFromContainer(cmpName, "runtime", namespace) volNamesAndPathsArr := strings.Fields(volNamesAndPaths) for _, volNamesAndPath := range volNamesAndPathsArr { volNamesAndPathArr := strings.Split(volNamesAndPath, ":") diff --git a/tests/integration/devfile/cmd_devfile_registry_test.go b/tests/integration/devfile/cmd_devfile_registry_test.go index 85c27e0297c..1188611df45 100644 --- a/tests/integration/devfile/cmd_devfile_registry_test.go +++ b/tests/integration/devfile/cmd_devfile_registry_test.go @@ -11,36 +11,34 @@ import ( ) var _ = Describe("odo devfile registry command tests", func() { - var project string - var context string - var currentWorkingDirectory string + var project, context, currentWorkingDirectory, originalKubeconfig string const registryName string = "RegistryName" const addRegistryURL string = "https://raw.githubusercontent.com/GeekArthur/registry/master" const updateRegistryURL string = "http://www.example.com/update" + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() + // This is run after every Spec (It) var _ = BeforeEach(func() { SetDefaultEventuallyTimeout(10 * time.Minute) context = helper.CreateNewContext() os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") - if os.Getenv("KUBERNETES") == "true" { - project = helper.CreateRandNamespace(context) - } else { - project = helper.CreateRandProject() - } + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + project = cliRunner.CreateRandNamespaceProject() currentWorkingDirectory = helper.Getwd() helper.Chdir(context) }) // This is run after every Spec (It) var _ = AfterEach(func() { - if os.Getenv("KUBERNETES") == "true" { - helper.DeleteNamespace(project) - } else { - helper.DeleteProject(project) - } + cliRunner.DeleteNamespaceProject(project) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) }) @@ -49,6 +47,14 @@ var _ = Describe("odo devfile registry command tests", func() { output := helper.CmdShouldPass("odo", "registry", "list") helper.MatchAllInOutput(output, []string{"CheDevfileRegistry", "DefaultDevfileRegistry"}) }) + + It("Should fail with an error with no registries", func() { + helper.CmdShouldPass("odo", "registry", "delete", "DefaultDevfileRegistry", "-f") + helper.CmdShouldPass("odo", "registry", "delete", "CheDevfileRegistry", "-f") + output := helper.CmdShouldFail("odo", "registry", "list") + helper.MatchAllInOutput(output, []string{"No devfile registries added to the configuration. Refer `odo registry add -h` to add one"}) + + }) }) Context("When executing registry commands with the registry is not present", func() { @@ -89,5 +95,6 @@ var _ = Describe("odo devfile registry command tests", func() { helper.CmdShouldPass("odo", "registry", "delete", registryName, "-f") helper.CmdShouldFail("odo", "create", "maven", "--registry", registryName) }) + }) }) diff --git a/tests/integration/devfile/cmd_devfile_url_test.go b/tests/integration/devfile/cmd_devfile_url_test.go index f430f93c2c6..a976bd8d0a3 100644 --- a/tests/integration/devfile/cmd_devfile_url_test.go +++ b/tests/integration/devfile/cmd_devfile_url_test.go @@ -44,7 +44,6 @@ var _ = Describe("odo devfile url command tests", func() { Context("Listing urls", func() { It("should list url after push", func() { - var stdout string url1 := helper.RandString(5) host := helper.RandString(5) + ".com" @@ -53,8 +52,11 @@ var _ = Describe("odo devfile url command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - stdout = helper.CmdShouldFail("odo", "url", "list") - Expect(stdout).To(ContainSubstring("no URLs found")) + stdout := helper.CmdShouldFail("odo", "url", "list") + helper.MatchAllInOutput(stdout, []string{ + "no URLs found", + "Refer `odo url create -h` to add one", + }) stdout = helper.CmdShouldFail("odo", "url", "create", url1, "--port", "8080") Expect(stdout).To(ContainSubstring("is not exposed")) @@ -63,7 +65,7 @@ var _ = Describe("odo devfile url command tests", func() { Expect(stdout).To(ContainSubstring("host must be provided")) helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") helper.WaitForCmdOut("odo", []string{"url", "list"}, 1, false, func(output string) bool { if strings.Contains(output, url1) { Expect(output).Should(ContainSubstring(url1 + "." + host)) @@ -72,13 +74,13 @@ var _ = Describe("odo devfile url command tests", func() { return false }) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) stdout = helper.CmdShouldFail("odo", "url", "list") Expect(stdout).To(ContainSubstring("no URLs found")) }) - It("should be able to list url in machine readable json format", func() { + It("should be able to list ingress url in machine readable json format", func() { url1 := helper.RandString(5) host := helper.RandString(5) + ".com" @@ -88,11 +90,11 @@ var _ = Describe("odo devfile url command tests", func() { helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) // odo url list -o json helper.WaitForCmdOut("odo", []string{"url", "list", "-o", "json"}, 1, true, func(output string) bool { - desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"Ingress","apiVersion":"extensions/v1beta1","metadata":{"name":"%s","creationTimestamp":null},"spec":{"rules":[{"host":"%s","http":{"paths":[{"path":"/","backend":{"serviceName":"%s","servicePort":3000}}]}}]},"status":{"loadBalancer":{}}}]}`, url1, url1+"."+host, componentName) + desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"%s","creationTimestamp":null},"spec":{"host":"%s","port":3000,"secure": false,"kind":"ingress"},"status":{"state":"Pushed"}}]}`, url1, url1+"."+host) if strings.Contains(output, url1) { Expect(desiredURLListJSON).Should(MatchJSON(output)) return true @@ -100,11 +102,63 @@ var _ = Describe("odo devfile url command tests", func() { return false }) }) + + It("should list ingress url with appropriate state", func() { + url1 := helper.RandString(5) + url2 := helper.RandString(5) + host := helper.RandString(5) + ".com" + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--secure", "--ingress") + helper.CmdShouldPass("odo", "push") + helper.CmdShouldPass("odo", "url", "create", url2, "--port", "3000", "--host", host, "--ingress") + stdout := helper.CmdShouldPass("odo", "url", "list") + helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "true", "ingress"}) + helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "ingress"}) + + helper.CmdShouldPass("odo", "url", "delete", url1, "-f") + stdout = helper.CmdShouldPass("odo", "url", "list") + helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", "true", "ingress"}) + helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "ingress"}) + }) + + It("should list route and ingress urls with appropriate state", func() { + if os.Getenv("KUBERNETES") == "true" { + Skip("This is a OpenShift specific scenario, skipping") + } + url1 := helper.RandString(5) + url2 := helper.RandString(5) + ingressurl := helper.RandString(5) + host := helper.RandString(5) + ".com" + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--secure") + helper.CmdShouldPass("odo", "url", "create", ingressurl, "--port", "3000", "--host", host, "--ingress") + helper.CmdShouldPass("odo", "push", "--project", namespace) + helper.CmdShouldPass("odo", "url", "create", url2, "--port", "3000") + stdout := helper.CmdShouldPass("odo", "url", "list", "--context", context) + helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "true", "route"}) + helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "route"}) + helper.MatchAllInOutput(stdout, []string{ingressurl, "Pushed", "false", "ingress"}) + + helper.CmdShouldPass("odo", "url", "delete", url1, "-f") + stdout = helper.CmdShouldPass("odo", "url", "list", "--context", context) + helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", "true", "route"}) + helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "route"}) + helper.MatchAllInOutput(stdout, []string{ingressurl, "Pushed", "false", "ingress"}) + }) }) Context("Creating urls", func() { It("should create a secure URL", func() { - var stdout string url1 := helper.RandString(5) host := helper.RandString(5) + ".com" @@ -115,18 +169,18 @@ var _ = Describe("odo devfile url command tests", func() { helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--secure", "--ingress") - stdout = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + stdout := helper.CmdShouldPass("odo", "push", "--project", namespace) helper.MatchAllInOutput(stdout, []string{"https:", url1 + "." + host}) stdout = helper.CmdShouldPass("odo", "url", "list") helper.MatchAllInOutput(stdout, []string{"https:", url1 + "." + host, "true"}) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) + helper.CmdShouldPass("odo", "push", "--project", namespace) stdout = helper.CmdShouldFail("odo", "url", "list") Expect(stdout).To(ContainSubstring("no URLs found")) }) - It("create with now flag should pass", func() { + It("create and delete with now flag should pass", func() { var stdout string url1 := helper.RandString(5) host := helper.RandString(5) + ".com" @@ -136,8 +190,10 @@ var _ = Describe("odo devfile url command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - stdout = helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--now", "--ingress", "--devfile", "devfile.yaml") + stdout = helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--now", "--ingress") helper.MatchAllInOutput(stdout, []string{"URL " + url1 + " created for component", "http:", url1 + "." + host}) + stdout = helper.CmdShouldPass("odo", "url", "delete", url1, "--now", "-f") + helper.MatchAllInOutput(stdout, []string{"URL " + url1 + " successfully deleted", "Applying URL changes"}) }) It("should create a automatically route on a openShift cluster", func() { @@ -155,20 +211,18 @@ var _ = Describe("odo devfile url command tests", func() { helper.CmdShouldPass("odo", "url", "create", url1) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - pushStdOut := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(pushStdOut).NotTo(ContainSubstring("successfully deleted")) - Expect(pushStdOut).NotTo(ContainSubstring("created")) + helper.CmdShouldPass("odo", "push", "--namespace", namespace) + pushStdOut := helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.DontMatchAllInOutput(pushStdOut, []string{"successfully deleted", "created"}) Expect(pushStdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required")) output := helper.CmdShouldPass("oc", "get", "routes", "--namespace", namespace) Expect(output).Should(ContainSubstring(url1)) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - pushStdOut = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--namespace", namespace) - Expect(pushStdOut).NotTo(ContainSubstring("successfully deleted")) - Expect(pushStdOut).NotTo(ContainSubstring("created")) + helper.CmdShouldPass("odo", "push", "--namespace", namespace) + pushStdOut = helper.CmdShouldPass("odo", "push", "--namespace", namespace) + helper.DontMatchAllInOutput(pushStdOut, []string{"successfully deleted", "created"}) Expect(pushStdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required")) output = helper.CmdShouldPass("oc", "get", "routes", "--namespace", namespace) @@ -203,25 +257,22 @@ var _ = Describe("odo devfile url command tests", func() { helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - stdOut = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - Expect(stdOut).NotTo(ContainSubstring("successfully deleted")) - Expect(stdOut).NotTo(ContainSubstring("created")) + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdOut = helper.CmdShouldPass("odo", "push", "--project", namespace) + helper.DontMatchAllInOutput(stdOut, []string{"successfully deleted", "created"}) Expect(stdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required")) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - stdOut = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - Expect(stdOut).NotTo(ContainSubstring("successfully deleted")) - Expect(stdOut).NotTo(ContainSubstring("created")) + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdOut = helper.CmdShouldPass("odo", "push", "--project", namespace) + helper.DontMatchAllInOutput(stdOut, []string{"successfully deleted", "created"}) Expect(stdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required")) }) }) Context("Describing urls", func() { - It("should describe appropriate URL and error messages", func() { - var stdout string + It("should describe appropriate Ingress URLs", func() { url1 := helper.RandString(5) host := helper.RandString(5) + ".com" @@ -232,18 +283,49 @@ var _ = Describe("odo devfile url command tests", func() { helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress") - stdout = helper.CmdShouldFail("odo", "url", "describe", url1) - helper.MatchAllInOutput(stdout, []string{url1, "exists in local", "odo push"}) + stdout := helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1 + "." + host, "Not Pushed", "false", "ingress", "odo push"}) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml", "--project", namespace) - helper.WaitForCmdOut("odo", []string{"url", "describe", url1}, 1, false, func(output string) bool { - if strings.Contains(output, url1) { - Expect(output).Should(ContainSubstring(url1 + "." + host)) - return true - } - return false - }) + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1 + "." + host, "Pushed", "false", "ingress"}) + helper.CmdShouldPass("odo", "url", "delete", url1, "-f") + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1 + "." + host, "Locally Deleted", "false", "ingress"}) + + helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--secure", "--ingress") + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1 + "." + host, "Pushed", "true", "ingress"}) + }) + + It("should describe appropriate Route URLs", func() { + if os.Getenv("KUBERNETES") == "true" { + Skip("This is a OpenShift specific scenario, skipping") + } + url1 := helper.RandString(5) + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000") + + stdout := helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1, "Not Pushed", "false", "route", "odo push"}) + + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "false", "route"}) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", "false", "route"}) + + helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--secure") + helper.CmdShouldPass("odo", "push", "--project", namespace) + stdout = helper.CmdShouldPass("odo", "url", "describe", url1) + helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "true", "route"}) }) }) diff --git a/tests/integration/devfile/cmd_devfile_watch_test.go b/tests/integration/devfile/cmd_devfile_watch_test.go index 71c34a7db98..cf40bc5041f 100644 --- a/tests/integration/devfile/cmd_devfile_watch_test.go +++ b/tests/integration/devfile/cmd_devfile_watch_test.go @@ -9,12 +9,14 @@ import ( . "github.com/onsi/gomega" "github.com/openshift/odo/tests/helper" + "github.com/openshift/odo/tests/integration/devfile/utils" ) var _ = Describe("odo devfile watch command tests", func() { - var namespace string - var context string - var currentWorkingDirectory string + var namespace, context, cmpName, currentWorkingDirectory, originalKubeconfig string + + // Using program commmand according to cliRunner in devfile + cliRunner := helper.GetCliRunner() // Setup up state for each test spec // create new project (not set as active) and new context directory for each test spec @@ -23,36 +25,30 @@ var _ = Describe("odo devfile watch command tests", func() { SetDefaultEventuallyTimeout(10 * time.Minute) context = helper.CreateNewContext() os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) - if os.Getenv("KUBERNETES") == "true" { - homeDir := helper.GetUserHomeDir() - kubeConfigFile := helper.CopyKubeConfigFile(filepath.Join(homeDir, ".kube", "config"), filepath.Join(context, "config")) - namespace = helper.CreateRandNamespace(kubeConfigFile) - } else { - namespace = helper.CreateRandProject() - } + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() currentWorkingDirectory = helper.Getwd() + cmpName = helper.RandString(6) helper.Chdir(context) + + // Set experimental mode to true + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") }) // Clean up after the test // This is run after every Spec (It) var _ = AfterEach(func() { - if os.Getenv("KUBERNETES") == "true" { - helper.DeleteNamespace(namespace) - os.Unsetenv("KUBECONFIG") - } else { - helper.DeleteProject(namespace) - } + cliRunner.DeleteNamespaceProject(namespace) helper.Chdir(currentWorkingDirectory) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") }) Context("when running help for watch command", func() { It("should display the help", func() { - // Devfile push requires experimental mode to be set - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") - appHelp := helper.CmdShouldPass("odo", "watch", "-h") Expect(appHelp).To(ContainSubstring("Watch for changes")) }) @@ -60,9 +56,6 @@ var _ = Describe("odo devfile watch command tests", func() { Context("when executing watch without pushing a devfile component", func() { It("should fail", func() { - // Devfile push requires experimental mode to be set - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") - cmpName := helper.RandString(6) helper.Chdir(currentWorkingDirectory) helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, "--context", context, cmpName) output := helper.CmdShouldFail("odo", "watch", "--context", context) @@ -70,20 +63,43 @@ var _ = Describe("odo devfile watch command tests", func() { }) }) - Context("when executing watch without a valid devfile", func() { - It("should fail", func() { - // Devfile push requires experimental mode to be set - helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") - output := helper.CmdShouldFail("odo", "watch", "--devfile", "fake-devfile.yaml") - Expect(output).To(ContainSubstring("The current directory does not represent an odo component")) + Context("when executing odo watch after odo push", func() { + It("should listen for file changes", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push", "--project", namespace) + Expect(output).To(ContainSubstring("Changes successfully pushed to component")) + + watchFlag := "" + odoV2Watch := utils.OdoV2Watch{ + CmpName: cmpName, + StringsToBeMatched: []string{"Executing devbuild command", "Executing devrun command"}, + } + // odo watch and validate + utils.OdoWatch(utils.OdoV1Watch{}, odoV2Watch, namespace, context, watchFlag, cliRunner, "kube") }) }) - Context("when executing odo watch with devfile flag without experimental mode", func() { - It("should fail", func() { - helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), context) - output := helper.CmdShouldFail("odo", "watch", "--devfile", filepath.Join(context, "devfile.yaml")) - Expect(output).To(ContainSubstring("Error: unknown flag: --devfile")) + Context("when executing odo watch after odo push with custom commands", func() { + It("should listen for file changes", func() { + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push", "--build-command", "build", "--run-command", "run", "--project", namespace) + Expect(output).To(ContainSubstring("Changes successfully pushed to component")) + + watchFlag := "--build-command build --run-command run" + odoV2Watch := utils.OdoV2Watch{ + CmpName: cmpName, + StringsToBeMatched: []string{"Executing build command", "Executing run command"}, + } + // odo watch and validate + utils.OdoWatch(utils.OdoV1Watch{}, odoV2Watch, namespace, context, watchFlag, cliRunner, "kube") }) }) }) diff --git a/tests/integration/devfile/debug/cmd_devfile_debug_test.go b/tests/integration/devfile/debug/cmd_devfile_debug_test.go new file mode 100644 index 00000000000..58776a2a949 --- /dev/null +++ b/tests/integration/devfile/debug/cmd_devfile_debug_test.go @@ -0,0 +1,119 @@ +package debug + +import ( + "github.com/openshift/odo/pkg/envinfo" + "github.com/openshift/odo/pkg/testingutil" + "github.com/openshift/odo/tests/helper" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// since during parallel runs of cmd devfile debug, the port might be occupied by the other tests +// we execute these tests serially +var _ = Describe("odo devfile debug command serial tests", func() { + + var context string + + var namespace, componentName, projectDirPath, originalKubeconfig string + var projectDir = "/projectDir" + + // current directory and project (before eny test is run) so it can restored after all testing is done + var originalDir string + + // Using program command according to cliRunner in devfile + cliRunner := helper.GetCliRunner() + + // Setup up state for each test spec + // create new project (not set as active) and new context directory for each test spec + // This is before every spec (It) + BeforeEach(func() { + SetDefaultEventuallyTimeout(10 * time.Minute) + context = helper.CreateNewContext() + os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) + + // Devfile push requires experimental mode to be set + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + + originalKubeconfig = os.Getenv("KUBECONFIG") + helper.LocalKubeconfigSet(context) + namespace = cliRunner.CreateRandNamespaceProject() + componentName = helper.RandString(6) + helper.Chdir(context) + originalDir = helper.Getwd() + projectDirPath = context + projectDir + }) + + // Clean up after the test + // This is run after every Spec (It) + AfterEach(func() { + helper.Chdir(originalDir) + cliRunner.DeleteNamespaceProject(namespace) + err := os.Setenv("KUBECONFIG", originalKubeconfig) + Expect(err).NotTo(HaveOccurred()) + helper.DeleteDir(context) + os.Unsetenv("GLOBALODOCONFIG") + }) + + It("should auto-select a local debug port when the given local port is occupied for a devfile component", func() { + // Devfile push requires experimental mode to be set + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true", "-f") + + helper.CmdShouldPass("git", "clone", "https://github.com/che-samples/web-nodejs-sample.git", projectDirPath) + helper.Chdir(projectDirPath) + + helper.CmdShouldPass("odo", "create", "nodejs", "--project", namespace, componentName) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), projectDirPath) + helper.RenameFile("devfile-with-debugrun.yaml", "devfile.yaml") + helper.CmdShouldPass("odo", "push", "--debug") + + stopChannel := make(chan bool) + go func() { + helper.CmdShouldRunAndTerminate(60*time.Second, stopChannel, "odo", "debug", "port-forward") + }() + + stopListenerChan := make(chan bool) + startListenerChan := make(chan bool) + listenerStarted := false + go func() { + defer GinkgoRecover() + err := testingutil.FakePortListener(startListenerChan, stopListenerChan, envinfo.DefaultDebugPort) + if err != nil { + close(startListenerChan) + Expect(err).Should(BeNil()) + } + }() + // wait for the test server to start listening + if <-startListenerChan { + listenerStarted = true + } + + freePort := "" + helper.WaitForCmdOut("odo", []string{"debug", "info"}, 1, false, func(output string) bool { + if strings.Contains(output, "Debug is running") { + splits := strings.SplitN(output, ":", 2) + Expect(len(splits)).To(Equal(2)) + freePort = strings.TrimSpace(splits[1]) + _, err := strconv.Atoi(freePort) + Expect(err).NotTo(HaveOccurred()) + return true + } + return false + }) + + // 400 response expected because the endpoint expects a websocket request and we are doing a HTTP GET + // We are just using this to validate if nodejs agent is listening on the other side + helper.HttpWaitForWithStatus("http://localhost:"+freePort, "WebSockets request was expected", 12, 5, 400) + stopChannel <- true + if listenerStarted == true { + stopListenerChan <- true + } else { + close(stopListenerChan) + } + }) +}) diff --git a/tests/integration/devfile/debug/debug_suite_test.go b/tests/integration/devfile/debug/debug_suite_test.go new file mode 100644 index 00000000000..c6dbe775a75 --- /dev/null +++ b/tests/integration/devfile/debug/debug_suite_test.go @@ -0,0 +1,15 @@ +package debug + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestDebug(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Debug Suite") + // Keep CustomReporters commented till https://github.com/onsi/ginkgo/issues/628 is fixed + // RunSpecsWithDefaultAndCustomReporters(t, "Project Suite", []Reporter{reporter.JunitReport(t, "../../../reports")}) +} diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_catalog_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_catalog_test.go index deaca5db10b..bc276aede15 100644 --- a/tests/integration/devfile/docker/cmd_docker_devfile_catalog_test.go +++ b/tests/integration/devfile/docker/cmd_docker_devfile_catalog_test.go @@ -36,7 +36,7 @@ var _ = Describe("odo docker devfile catalog command tests", func() { Context("When executing catalog list components on Docker", func() { It("should list all supported devfile components", func() { output := helper.CmdShouldPass("odo", "catalog", "list", "components") - helper.MatchAllInOutput(output, []string{"Odo Devfile Components", "java-spring-boot", "openLiberty"}) + helper.MatchAllInOutput(output, []string{"Odo Devfile Components", "java-spring-boot", "java-openliberty"}) }) }) @@ -46,4 +46,35 @@ var _ = Describe("odo docker devfile catalog command tests", func() { helper.MatchAllInOutput(output, []string{"Odo Devfile Components", "java-spring-boot", "java-maven", "php-mysql"}) }) }) + + Context("When executing catalog list components with -o json flag", func() { + It("should list devfile components in json format", func() { + output := helper.CmdShouldPass("odo", "catalog", "list", "components", "-o", "json") + wantOutput := []string{ + "odo.dev/v1alpha1", + "openLiberty", + "java-spring-boot", + "nodejs", + "quarkus", + "php-mysql", + "maven", + "golang", + "java-maven", + } + helper.MatchAllInOutput(output, wantOutput) + }) + }) + + Context("When executing catalog list components with registry that is not set up properly", func() { + It("should list components from valid registry", func() { + helper.CmdShouldPass("odo", "registry", "add", "fake", "http://fake") + output := helper.CmdShouldPass("odo", "catalog", "list", "components") + helper.MatchAllInOutput(output, []string{ + "Odo Devfile Components", + "java-spring-boot", + "quarkus", + }) + helper.CmdShouldPass("odo", "registry", "delete", "fake", "-f") + }) + }) }) diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_delete_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_delete_test.go index 5a6589f0245..7c9a966ee12 100644 --- a/tests/integration/devfile/docker/cmd_docker_devfile_delete_test.go +++ b/tests/integration/devfile/docker/cmd_docker_devfile_delete_test.go @@ -83,13 +83,13 @@ var _ = Describe("odo docker devfile delete command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") Expect(dockerClient.GetRunningContainersByLabel("component=" + cmpName)).To(HaveLen(1)) Expect(dockerClient.GetSourceAndStorageVolumesByComponent(cmpName)).To(HaveLen(1)) - helper.CmdShouldPass("odo", "delete", "--devfile", "devfile.yaml", "-f") + helper.CmdShouldPass("odo", "delete", "-f") Expect(dockerClient.GetRunningContainersByLabel("component=" + cmpName)).To(HaveLen(0)) @@ -105,7 +105,7 @@ var _ = Describe("odo docker devfile delete command tests", func() { helper.RenameFile("devfile.yaml", "devfile-old.yaml") helper.RenameFile("devfile-with-volumes.yaml", "devfile.yaml") - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // Retrieve the volume from one of the aliases in the devfile @@ -119,7 +119,7 @@ var _ = Describe("odo docker devfile delete command tests", func() { Expect(dockerClient.GetSourceAndStorageVolumesByComponent(cmpName)).To(HaveLen(3)) - helper.CmdShouldPass("odo", "delete", "--devfile", "devfile.yaml", "-f") + helper.CmdShouldPass("odo", "delete", "-f") Expect(dockerClient.GetRunningContainersByLabel("component=" + cmpName)).To(HaveLen(0)) @@ -137,13 +137,13 @@ var _ = Describe("odo docker devfile delete command tests", func() { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") Expect(dockerClient.GetRunningContainersByLabel("component=" + cmpName)).To(HaveLen(1)) Expect(dockerClient.GetSourceAndStorageVolumesByComponent(cmpName)).To(HaveLen(1)) - helper.CmdShouldPass("odo", "delete", "--devfile", "devfile.yaml", "-f", "--all") + helper.CmdShouldPass("odo", "delete", "-f", "--all") Expect(dockerClient.GetRunningContainersByLabel("component=" + cmpName)).To(HaveLen(0)) diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_push_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_push_test.go index de4c43f3e48..f41e518a82a 100644 --- a/tests/integration/devfile/docker/cmd_docker_devfile_push_test.go +++ b/tests/integration/devfile/docker/cmd_docker_devfile_push_test.go @@ -39,6 +39,8 @@ var _ = Describe("odo docker devfile push command tests", func() { label := "component=" + cmpName dockerClient.StopContainers(label) + dockerClient.RemoveVolumesByComponent(cmpName) + helper.Chdir(currentWorkingDirectory) helper.DeleteDir(context) os.Unsetenv("GLOBALODOCONFIG") @@ -50,24 +52,24 @@ var _ = Describe("odo docker devfile push command tests", func() { helper.CmdShouldPass("odo", "create", "nodejs", "--context", context, cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), context) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // update devfile and push again helper.ReplaceString("devfile.yaml", "name: FOO", "name: BAR") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") }) It("Check that odo push works with a devfile that has multiple containers", func() { // Springboot devfile references multiple containers helper.CmdShouldPass("odo", "create", "java-spring-boot", "--context", context, cmpName) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // update devfile and push again helper.ReplaceString("devfile.yaml", "name: FOO", "name: BAR") - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") }) It("Check that odo push works with a devfile that has volumes defined", func() { @@ -77,13 +79,13 @@ var _ = Describe("odo docker devfile push command tests", func() { helper.RenameFile("devfile.yaml", "devfile-old.yaml") helper.RenameFile("devfile-with-volumes.yaml", "devfile.yaml") - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // Verify the volumes got created successfully (and 3 volumes exist: one source and two defined in devfile) label := "component=" + cmpName volumes := dockerClient.GetVolumesByLabel(label) - Expect(len(volumes)).To(Equal(3)) + Expect(len(volumes)).To(Equal(4)) }) It("Check that odo push mounts the docker volumes in the container", func() { @@ -93,7 +95,7 @@ var _ = Describe("odo docker devfile push command tests", func() { helper.RenameFile("devfile.yaml", "devfile-old.yaml") helper.RenameFile("devfile-with-volumes.yaml", "devfile.yaml") - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") Expect(output).To(ContainSubstring("Changes successfully pushed to component")) // Retrieve the volume from one of the aliases in the devfile @@ -106,6 +108,21 @@ var _ = Describe("odo docker devfile push command tests", func() { Expect(volMounted).To(Equal(true)) }) + It("checks that odo push with -o json displays machine readable JSON event output", func() { + + helper.CmdShouldPass("odo", "create", "nodejs", "--context", context, cmpName) + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs"), context) + + output := helper.CmdShouldPass("odo", "push", "-o", "json") + utils.AnalyzePushConsoleOutput(output) + + // update devfile and push again + helper.ReplaceString("devfile.yaml", "name: FOO", "name: BAR") + output = helper.CmdShouldPass("odo", "push", "-o", "json") + utils.AnalyzePushConsoleOutput(output) + + }) + It("should not build when no changes are detected in the directory and build when a file change is detected", func() { utils.ExecPushToTestFileChanges(context, cmpName, "") }) @@ -120,18 +137,16 @@ var _ = Describe("odo docker devfile push command tests", func() { Expect(len(containers)).To(Equal(1)) stdOut := dockerClient.ExecContainer(containers[0], "ls -la "+sourcePath) - Expect(stdOut).To(ContainSubstring(("foobar.txt"))) - Expect(stdOut).To(ContainSubstring(("testdir"))) + helper.MatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) // Now we delete the file and dir and push helper.DeleteDir(newFilePath) helper.DeleteDir(newDirPath) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") // Then check to see if it's truly been deleted stdOut = dockerClient.ExecContainer(containers[0], "ls -la "+sourcePath) - Expect(stdOut).To(Not(ContainSubstring(("foobar.txt")))) - Expect(stdOut).To(Not(ContainSubstring(("testdir")))) + helper.DontMatchAllInOutput(stdOut, []string{"foobar.txt", "testdir"}) }) It("should build when no changes are detected in the directory and force flag is enabled", func() { @@ -153,12 +168,14 @@ var _ = Describe("odo docker devfile push command tests", func() { helper.CmdShouldPass("odo", "create", "java-spring-boot", cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) + helper.CopyExampleDevFile(filepath.Join("source", "devfilesV1", "springboot", "devfile-init.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") - Expect(output).To(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devbuild command \"/artifacts/bin/build-container-full.sh\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + output := helper.CmdShouldPass("odo", "push") + helper.MatchAllInOutput(output, []string{ + "Executing devinit command \"echo hello", + "Executing devbuild command \"/artifacts/bin/build-container-full.sh\"", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) // Check to see if it's been pushed (foobar.txt abd directory testdir) containers := dockerClient.GetRunningContainersByCompAlias(cmpName, "runtime") @@ -172,11 +189,13 @@ var _ = Describe("odo docker devfile push command tests", func() { helper.CmdShouldPass("odo", "create", "java-spring-boot", cmpName) helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile-init-without-build.yaml"), filepath.Join(context, "devfile.yaml")) + helper.CopyExampleDevFile(filepath.Join("source", "devfilesV1", "springboot", "devfile-init-without-build.yaml"), filepath.Join(context, "devfile.yaml")) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") - Expect(output).To(ContainSubstring("Executing devinit command \"echo hello")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + output := helper.CmdShouldPass("odo", "push") + helper.MatchAllInOutput(output, []string{ + "Executing devinit command \"echo hello", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) // Check to see if it's been pushed (foobar.txt abd directory testdir) containers := dockerClient.GetRunningContainersByCompAlias(cmpName, "runtime") diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_url_pushtarget_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_url_pushtarget_test.go new file mode 100644 index 00000000000..929ba6d1bed --- /dev/null +++ b/tests/integration/devfile/docker/cmd_docker_devfile_url_pushtarget_test.go @@ -0,0 +1,82 @@ +package docker + +import ( + "os" + "path/filepath" + "time" + + "github.com/openshift/odo/tests/helper" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("odo docker devfile url pushtarget command tests", func() { + var context, currentWorkingDirectory, cmpName string + dockerClient := helper.NewDockerRunner("docker") + + // This is run after every Spec (It) + var _ = BeforeEach(func() { + SetDefaultEventuallyTimeout(10 * time.Minute) + context = helper.CreateNewContext() + currentWorkingDirectory = helper.Getwd() + cmpName = helper.RandString(6) + helper.Chdir(context) + os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "docker") + }) + + // Clean up after the test + // This is run after every Spec (It) + var _ = AfterEach(func() { + // Stop all containers labeled with the component name + label := "component=" + cmpName + dockerClient.StopContainers(label) + + helper.Chdir(currentWorkingDirectory) + helper.DeleteDir(context) + os.Unsetenv("GLOBALODOCONFIG") + }) + + // These tests require an active kube context *and* Docker daemon, so keeping them separate + // from the other Docker URL tests which only require Docker. + Context("Switching pushtarget", func() { + It("switch from docker to kube, odo push should display warning", func() { + var stdout string + + helper.CmdShouldPass("odo", "create", "nodejs", cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + helper.CmdShouldPass("odo", "url", "create") + + helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "kube", "-f") + session := helper.CmdRunner("odo", "push") + stdout = string(session.Wait().Out.Contents()) + stderr := string(session.Wait().Err.Contents()) + Expect(stderr).To(ContainSubstring("Found a URL defined for Docker, but no valid URLs for Kubernetes.")) + Expect(stdout).To(ContainSubstring("Changes successfully pushed to component")) + }) + + It("switch from kube to docker, odo push should display warning", func() { + var stdout string + helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "kube", "-f") + helper.CmdShouldPass("odo", "create", "nodejs", cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + helper.CmdShouldPass("odo", "url", "create", "--host", "1.2.3.4.com", "--ingress") + + helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "docker", "-f") + session := helper.CmdRunner("odo", "push") + stdout = string(session.Wait().Out.Contents()) + stderr := string(session.Wait().Err.Contents()) + Expect(stderr).To(ContainSubstring("Found a URL defined for Kubernetes, but no valid URLs for Docker.")) + Expect(stdout).To(ContainSubstring("Changes successfully pushed to component")) + }) + }) + +}) diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_url_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_url_test.go index e3e7ec63255..5088d07d0b9 100644 --- a/tests/integration/devfile/docker/cmd_docker_devfile_url_test.go +++ b/tests/integration/devfile/docker/cmd_docker_devfile_url_test.go @@ -53,7 +53,7 @@ var _ = Describe("odo docker devfile url command tests", func() { stdout = helper.CmdShouldPass("odo", "url", "create") helper.MatchAllInOutput(stdout, []string{cmpName + "-3000", "created for component"}) - stdout = helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + stdout = helper.CmdShouldPass("odo", "push") Expect(stdout).To(ContainSubstring("Changes successfully pushed to component")) }) @@ -94,7 +94,7 @@ var _ = Describe("odo docker devfile url command tests", func() { helper.CmdShouldPass("odo", "url", "create", cmpName) - output := helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + output := helper.CmdShouldPass("odo", "push") helper.MatchAllInOutput(output, []string{"Executing devbuild command", "Executing devrun command"}) url := strings.TrimSpace(helper.ExtractSubString(output, "127.0.0.1", "created")) @@ -103,44 +103,6 @@ var _ = Describe("odo docker devfile url command tests", func() { }) }) - Context("Switching pushtarget", func() { - It("switch from docker to kube, odo push should display warning", func() { - var stdout string - - helper.CmdShouldPass("odo", "create", "nodejs", cmpName) - - helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - - helper.CmdShouldPass("odo", "url", "create") - - helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "kube", "-f") - session := helper.CmdRunner("odo", "push", "--devfile", "devfile.yaml") - stdout = string(session.Wait().Out.Contents()) - stderr := string(session.Wait().Err.Contents()) - Expect(stderr).To(ContainSubstring("Found a URL defined for Docker, but no valid URLs for Kubernetes.")) - Expect(stdout).To(ContainSubstring("Changes successfully pushed to component")) - }) - - It("switch from kube to docker, odo push should display warning", func() { - var stdout string - helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "kube", "-f") - helper.CmdShouldPass("odo", "create", "nodejs", cmpName) - - helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) - helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) - - helper.CmdShouldPass("odo", "url", "create", "--host", "1.2.3.4.com", "--ingress") - - helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "docker", "-f") - session := helper.CmdRunner("odo", "push", "--devfile", "devfile.yaml") - stdout = string(session.Wait().Out.Contents()) - stderr := string(session.Wait().Err.Contents()) - Expect(stderr).To(ContainSubstring("Found a URL defined for Kubernetes, but no valid URLs for Docker.")) - Expect(stdout).To(ContainSubstring("Changes successfully pushed to component")) - }) - }) - Context("Listing urls", func() { It("should list url with appropriate state", func() { var stdout string @@ -157,7 +119,7 @@ var _ = Describe("odo docker devfile url command tests", func() { stdout = helper.CmdShouldPass("odo", "url", "list") helper.MatchAllInOutput(stdout, []string{url1, "Not Pushed"}) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") stdout = helper.CmdShouldPass("odo", "url", "list") helper.MatchAllInOutput(stdout, []string{url1, "Pushed"}) helper.CmdShouldPass("odo", "url", "delete", url1, "-f") @@ -194,7 +156,7 @@ var _ = Describe("odo docker devfile url command tests", func() { }) Context("Describing urls", func() { - It("should describe URL with appropriate appropriate", func() { + It("should describe URL with appropriate state", func() { var stdout string url1 := helper.RandString(5) helper.CmdShouldPass("odo", "create", "nodejs", cmpName) @@ -207,7 +169,7 @@ var _ = Describe("odo docker devfile url command tests", func() { stdout = helper.CmdShouldPass("odo", "url", "describe", url1) helper.MatchAllInOutput(stdout, []string{url1, "Not Pushed"}) - helper.CmdShouldPass("odo", "push", "--devfile", "devfile.yaml") + helper.CmdShouldPass("odo", "push") stdout = helper.CmdShouldPass("odo", "url", "describe", url1) helper.MatchAllInOutput(stdout, []string{url1, "Pushed"}) @@ -240,5 +202,4 @@ var _ = Describe("odo docker devfile url command tests", func() { Expect(desiredURLListJSON).Should(MatchJSON(stdout)) }) }) - }) diff --git a/tests/integration/devfile/docker/cmd_docker_devfile_watch_test.go b/tests/integration/devfile/docker/cmd_docker_devfile_watch_test.go new file mode 100644 index 00000000000..e03cc5269fe --- /dev/null +++ b/tests/integration/devfile/docker/cmd_docker_devfile_watch_test.go @@ -0,0 +1,86 @@ +package docker + +import ( + "os" + "path/filepath" + "time" + + "github.com/openshift/odo/tests/helper" + "github.com/openshift/odo/tests/integration/devfile/utils" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("odo docker devfile watch command tests", func() { + var context, currentWorkingDirectory, cmpName string + + dockerClient := helper.NewDockerRunner("docker") + + // This is run after every Spec (It) + var _ = BeforeEach(func() { + SetDefaultEventuallyTimeout(10 * time.Minute) + context = helper.CreateNewContext() + currentWorkingDirectory = helper.Getwd() + cmpName = helper.RandString(6) + helper.Chdir(context) + os.Setenv("GLOBALODOCONFIG", filepath.Join(context, "config.yaml")) + + // Local devfile push requires experimental mode to be set and the pushtarget set to docker + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") + helper.CmdShouldPass("odo", "preference", "set", "pushtarget", "docker") + }) + + // Clean up after the test + // This is run after every Spec (It) + var _ = AfterEach(func() { + // Stop all containers labeled with the component name + label := "component=" + cmpName + dockerClient.StopContainers(label) + + helper.Chdir(currentWorkingDirectory) + helper.DeleteDir(context) + os.Unsetenv("GLOBALODOCONFIG") + }) + + Context("when executing odo watch after odo push", func() { + It("should listen for file changes", func() { + helper.CmdShouldPass("odo", "create", "nodejs", cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push") + Expect(output).To(ContainSubstring("Changes successfully pushed to component")) + + watchFlag := "" + odoV2Watch := utils.OdoV2Watch{ + CmpName: cmpName, + StringsToBeMatched: []string{"Executing devbuild command", "Executing devrun command"}, + } + // odo watch and validate + utils.OdoWatch(utils.OdoV1Watch{}, odoV2Watch, "", context, watchFlag, dockerClient, "docker") + }) + }) + + Context("when executing odo watch after odo push with custom commands", func() { + It("should listen for file changes", func() { + helper.CmdShouldPass("odo", "create", "nodejs", cmpName) + + helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context) + helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context, "devfile.yaml")) + + output := helper.CmdShouldPass("odo", "push", "--build-command", "build", "--run-command", "run") + Expect(output).To(ContainSubstring("Changes successfully pushed to component")) + + watchFlag := "--build-command build --run-command run" + odoV2Watch := utils.OdoV2Watch{ + CmpName: cmpName, + StringsToBeMatched: []string{"Executing build command", "Executing run command"}, + } + // odo watch and validate + utils.OdoWatch(utils.OdoV1Watch{}, odoV2Watch, "", context, watchFlag, dockerClient, "docker") + }) + }) + +}) diff --git a/tests/integration/devfile/utils/cmd_devfile_push_test_utils.go b/tests/integration/devfile/utils/cmd_devfile_push_test_utils.go new file mode 100644 index 00000000000..74b8061cf1d --- /dev/null +++ b/tests/integration/devfile/utils/cmd_devfile_push_test_utils.go @@ -0,0 +1,130 @@ +package utils + +import ( + "encoding/json" + "strings" + + "github.com/openshift/odo/pkg/machineoutput" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// AnalyzePushConsoleOutput analyzes the output of 'odo push -o json' for the machine readable event push test above. +func AnalyzePushConsoleOutput(pushConsoleOutput string) { + + lines := strings.Split(strings.Replace(pushConsoleOutput, "\r\n", "\n", -1), "\n") + + var entries []machineoutput.MachineEventLogEntry + + // Ensure that all lines can be correctly parsed into their expected JSON structure + for _, line := range lines { + + if len(strings.TrimSpace(line)) == 0 { + continue + } + + // fmt.Println("Processing output line: " + line) + + lineWrapper := machineoutput.MachineEventWrapper{} + + err := json.Unmarshal([]byte(line), &lineWrapper) + Expect(err).NotTo(HaveOccurred()) + + entry, err := lineWrapper.GetEntry() + Expect(err).NotTo(HaveOccurred()) + + entries = append(entries, entry) + + } + + // Ensure we pass a sanity test on the minimum expected entries + if len(entries) < 4 { + Fail("Expected at least 4 entries, corresponding to command/action execution.") + } + + // Ensure that all logText entries are wrapped inside commandExecutionBegin and commandExecutionComplete entries (e.g. no floating logTexts) + insideCommandExecution := false + for _, entry := range entries { + + if entry.GetType() == machineoutput.TypeDevFileCommandExecutionBegin { + insideCommandExecution = true + } + + if entry.GetType() == machineoutput.TypeDevFileCommandExecutionComplete { + insideCommandExecution = false + } + + if entry.GetType() == machineoutput.TypeLogText { + Expect(insideCommandExecution).To(Equal(true)) + } + + } + + // Ensure that the log output has the given structure: + // - look for the expected devbuild events, then look for the expected devrun events. + expectedEventOrder := []struct { + entryType machineoutput.MachineEventLogEntryType + commandName string + }{ + // first the devbuild command (and its action) should run + { + machineoutput.TypeDevFileCommandExecutionBegin, + "devbuild", + }, + { + // at least one logged line of text + machineoutput.TypeLogText, + "", + }, + { + machineoutput.TypeDevFileCommandExecutionComplete, + "devbuild", + }, + // next the devbuild command (and its action) should run + { + machineoutput.TypeDevFileCommandExecutionBegin, + "devrun", + }, + { + // at least one logged line of text + machineoutput.TypeLogText, + "", + }, + { + machineoutput.TypeDevFileCommandExecutionComplete, + "devrun", + }, + } + currIndex := -1 + for _, nextEventOrder := range expectedEventOrder { + entry, newIndex := findNextEntryByType(currIndex, nextEventOrder.entryType, entries) + Expect(entry).NotTo(BeNil()) + Expect(newIndex).To(BeNumerically(">=", 0)) + Expect(newIndex).To(BeNumerically(">", currIndex)) // monotonically increasing index + + // We should see devbuild for the first set of events, then devrun + commandName := machineoutput.GetCommandName(entry) + Expect(commandName).To(Equal(nextEventOrder.commandName)) + + currIndex = newIndex + } + +} + +// findNextEntryByType locates the next entry of a given type within a slice. Currently used for test purposes only. +func findNextEntryByType(initialIndex int, typeToFind machineoutput.MachineEventLogEntryType, entries []machineoutput.MachineEventLogEntry) (machineoutput.MachineEventLogEntry, int) { + + for index, entry := range entries { + if index < initialIndex { + continue + } + + if entry.GetType() == typeToFind { + return entry, index + } + } + + return nil, -1 + +} diff --git a/tests/integration/devfile/utils/utils.go b/tests/integration/devfile/utils/utils.go index 8aa0aff94b6..20f4a312c63 100644 --- a/tests/integration/devfile/utils/utils.go +++ b/tests/integration/devfile/utils/utils.go @@ -2,7 +2,11 @@ package utils import ( "fmt" + "os" "path/filepath" + "reflect" + "strings" + "time" "github.com/openshift/odo/tests/helper" @@ -26,11 +30,13 @@ func ExecDefaultDevfileCommands(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldPass("odo", args...) - Expect(output).To(ContainSubstring("Executing devbuild command \"/artifacts/bin/build-container-full.sh\"")) - Expect(output).To(ContainSubstring("Executing devrun command \"/artifacts/bin/start-server.sh\"")) + helper.MatchAllInOutput(output, []string{ + "Executing devbuild command \"/artifacts/bin/build-container-full.sh\"", + "Executing devrun command \"/artifacts/bin/start-server.sh\"", + }) } // ExecWithMissingBuildCommand executes odo push with a missing build command @@ -42,7 +48,7 @@ func ExecWithMissingBuildCommand(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-without-devbuild.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldPass("odo", args...) Expect(output).NotTo(ContainSubstring("Executing devbuild command")) @@ -58,14 +64,14 @@ func ExecWithMissingRunCommand(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - // Rename the devrun command - helper.ReplaceString(filepath.Join(projectDirPath, "devfile.yaml"), "devrun", "randomcommand") + // Remove the run commands + helper.ReplaceString(filepath.Join(projectDirPath, "devfile.yaml"), "kind: run", "kind: debug") - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldFail("odo", args...) Expect(output).NotTo(ContainSubstring("Executing devrun command")) - Expect(output).To(ContainSubstring("The command \"devrun\" was not found in the devfile")) + Expect(output).To(ContainSubstring("the command type \"run\" is not found in the devfile")) } // ExecWithCustomCommand executes odo push with a custom command @@ -77,11 +83,13 @@ func ExecWithCustomCommand(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml", "--build-command", "build", "--run-command", "run"} + args = []string{"push", "--build-command", "build", "--run-command", "run"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldPass("odo", args...) - Expect(output).To(ContainSubstring("Executing build command \"npm install\"")) - Expect(output).To(ContainSubstring("Executing run command \"nodemon app.js\"")) + helper.MatchAllInOutput(output, []string{ + "Executing build command \"npm install\"", + "Executing run command \"nodemon app.js\"", + }) } // ExecWithWrongCustomCommand executes odo push with a wrong custom command @@ -95,11 +103,11 @@ func ExecWithWrongCustomCommand(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml", "--build-command", garbageCommand} + args = []string{"push", "--build-command", garbageCommand} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldFail("odo", args...) Expect(output).NotTo(ContainSubstring("Executing buildgarbage command")) - Expect(output).To(ContainSubstring("The command \"%v\" was not found in the devfile", garbageCommand)) + Expect(output).To(ContainSubstring("the command \"%v\" is not found in the devfile", garbageCommand)) } // ExecPushToTestFileChanges executes odo push with and without a file change @@ -111,7 +119,7 @@ func ExecPushToTestFileChanges(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) helper.CmdShouldPass("odo", args...) @@ -132,12 +140,12 @@ func ExecPushWithForceFlag(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) helper.CmdShouldPass("odo", args...) // use the force build flag and push - args = []string{"push", "--devfile", "devfile.yaml", "-f"} + args = []string{"push", "-f"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldPass("odo", args...) Expect(output).To(Not(ContainSubstring("No file changes detected, skipping build"))) @@ -161,7 +169,7 @@ func ExecPushWithNewFileAndDir(projectDirPath, cmpName, namespace, newFilePath, helper.MakeDir(newDirPath) // Push - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) helper.CmdShouldPass("odo", args...) } @@ -175,14 +183,147 @@ func ExecWithRestartAttribute(projectDirPath, cmpName, namespace string) { helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), projectDirPath) helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-restart.yaml"), filepath.Join(projectDirPath, "devfile.yaml")) - args = []string{"push", "--devfile", "devfile.yaml"} + args = []string{"push"} args = useProjectIfAvailable(args, namespace) output := helper.CmdShouldPass("odo", args...) Expect(output).To(ContainSubstring("Executing devrun command \"nodemon app.js\"")) - args = []string{"push", "-f", "--devfile", "devfile.yaml"} + args = []string{"push", "-f"} args = useProjectIfAvailable(args, namespace) output = helper.CmdShouldPass("odo", args...) Expect(output).To(ContainSubstring("if not running")) } + +type OdoV1Watch struct { + SrcType string + RouteURL string + AppName string +} + +type OdoV2Watch struct { + CmpName string + StringsToBeMatched []string +} + +// OdoWatch creates files, dir in the context and watches for the changes to be pushed +// Specify OdoV1Watch for odo version 1, OdoV2Watch for odo version 2(devfile) +// platform is either kube or docker +func OdoWatch(odoV1Watch OdoV1Watch, odoV2Watch OdoV2Watch, project, context, flag string, runner interface{}, platform string) { + + isDevfileTest := false + + // if the odoV2Watch object is not empty, its a devfile test + if !reflect.DeepEqual(odoV2Watch, OdoV2Watch{}) { + isDevfileTest = true + } + + startSimulationCh := make(chan bool) + go func() { + startMsg := <-startSimulationCh + if startMsg { + err := os.MkdirAll(filepath.Join(context, ".abc"), 0750) + Expect(err).To(BeNil()) + + err = os.MkdirAll(filepath.Join(context, "abcd"), 0750) + Expect(err).To(BeNil()) + + _, err = os.Create(filepath.Join(context, "a.txt")) + Expect(err).To(BeNil()) + + helper.DeleteDir(filepath.Join(context, "abcd")) + + if isDevfileTest { + helper.ReplaceString(filepath.Join(context, "app", "app.js"), "Hello", "Hello odo") + } else { + if odoV1Watch.SrcType == "openjdk" { + helper.ReplaceString(filepath.Join(context, "src", "main", "java", "MessageProducer.java"), "Hello", "Hello odo") + } else { + helper.ReplaceString(filepath.Join(context, "server.js"), "Hello", "Hello odo") + } + } + } + }() + + if !isDevfileTest { + flag = strings.TrimSpace(fmt.Sprintf("%s-app -v 4 %s", odoV1Watch.SrcType, flag)) + } + + success, err := helper.WatchNonRetCmdStdOut( + ("odo watch " + flag + " --context " + context), + time.Duration(5)*time.Minute, + func(output string) bool { + if isDevfileTest { + stringsMatched := true + + for _, stringToBeMatched := range odoV2Watch.StringsToBeMatched { + if !strings.Contains(output, stringToBeMatched) { + stringsMatched = false + } + } + + if stringsMatched { + // Verify delete from component pod + err := validateContainerExecListDir(odoV1Watch, odoV2Watch, runner, platform, project, isDevfileTest) + Expect(err).To(BeNil()) + return true + } + } else { + curlURL := helper.CmdShouldPass("curl", odoV1Watch.RouteURL) + if strings.Contains(curlURL, "Hello odo") { + // Verify delete from component pod + err := validateContainerExecListDir(odoV1Watch, odoV2Watch, runner, platform, project, isDevfileTest) + Expect(err).To(BeNil()) + return true + } + } + + return false + }, + startSimulationCh, + func(output string) bool { + return strings.Contains(output, "Waiting for something to change") + }) + + Expect(success).To(Equal(true)) + Expect(err).To(BeNil()) + + if !isDevfileTest { + // Verify memory limits to be same as configured + getMemoryLimit := runner.(helper.OcRunner).MaxMemory(odoV1Watch.SrcType+"-app", odoV1Watch.AppName, project) + Expect(getMemoryLimit).To(ContainSubstring("700Mi")) + getMemoryRequest := runner.(helper.OcRunner).MinMemory(odoV1Watch.SrcType+"-app", odoV1Watch.AppName, project) + Expect(getMemoryRequest).To(ContainSubstring("400Mi")) + } +} + +func validateContainerExecListDir(odoV1Watch OdoV1Watch, odoV2Watch OdoV2Watch, runner interface{}, platform, project string, isDevfileTest bool) error { + var stdOut string + + switch platform { + case "kube": + if isDevfileTest { + cliRunner := runner.(helper.CliRunner) + podName := cliRunner.GetRunningPodNameByComponent(odoV2Watch.CmpName, project) + stdOut = cliRunner.ExecListDir(podName, project, "/projects/nodejs-web-app") + } else { + ocRunner := runner.(helper.OcRunner) + podName := ocRunner.GetRunningPodNameOfComp(odoV1Watch.SrcType+"-app", project) + envs := ocRunner.GetEnvs(odoV1Watch.SrcType+"-app", odoV1Watch.AppName, project) + dir := envs["ODO_S2I_SRC_BIN_PATH"] + stdOut = ocRunner.ExecListDir(podName, project, filepath.Join(dir, "src")) + } + case "docker": + dockerRunner := runner.(helper.DockerRunner) + containers := dockerRunner.GetRunningContainersByCompAlias(odoV2Watch.CmpName, "runtime") + Expect(len(containers)).To(Equal(1)) + stdOut = dockerRunner.ExecContainer(containers[0], "ls -la /projects/nodejs-web-app") + default: + return fmt.Errorf("Platform %s is not supported", platform) + } + + helper.MatchAllInOutput(stdOut, []string{"a.txt", ".abc"}) + helper.DontMatchAllInOutput(stdOut, []string{"abcd"}) + + return nil +} diff --git a/tests/integration/generic_test.go b/tests/integration/generic_test.go index ae9220d211c..4d6eebcbd0f 100644 --- a/tests/integration/generic_test.go +++ b/tests/integration/generic_test.go @@ -50,11 +50,7 @@ var _ = Describe("odo generic", func() { Context("When executing catalog list without component directory", func() { It("should list all component catalogs", func() { stdOut := helper.CmdShouldPass("odo", "catalog", "list", "components") - Expect(stdOut).To(ContainSubstring("dotnet")) - Expect(stdOut).To(ContainSubstring("nginx")) - Expect(stdOut).To(ContainSubstring("php")) - Expect(stdOut).To(ContainSubstring("ruby")) - Expect(stdOut).To(ContainSubstring("wildfly")) + helper.MatchAllInOutput(stdOut, []string{"dotnet", "nginx", "php", "ruby", "wildfly"}) }) }) @@ -274,11 +270,10 @@ var _ = Describe("odo generic", func() { odoVersionStringMatch := reOdoVersion.MatchString(odoVersion) rekubernetesVersion := regexp.MustCompile(`Kubernetes:\s*v[0-9]+.[0-9]+.[0-9]+((-\w+\.[0-9]+)?\+\w+)?`) kubernetesVersionStringMatch := rekubernetesVersion.MatchString(odoVersion) - reServerURL := regexp.MustCompile(`Server:\s*https:\/\/(.+\.com|([0-9]+.){3}[0-9]+):[0-9]{4}`) - serverURLStringMatch := reServerURL.MatchString(odoVersion) Expect(odoVersionStringMatch).Should(BeTrue()) Expect(kubernetesVersionStringMatch).Should(BeTrue()) - Expect(serverURLStringMatch).Should(BeTrue()) + serverURL := oc.GetCurrentServerURL() + Expect(odoVersion).Should(ContainSubstring("Server: " + serverURL)) }) }) diff --git a/tests/integration/operatorhub/cmd_service_test.go b/tests/integration/operatorhub/cmd_service_test.go index e21a2dd9b55..c1a34a38cfb 100644 --- a/tests/integration/operatorhub/cmd_service_test.go +++ b/tests/integration/operatorhub/cmd_service_test.go @@ -14,42 +14,72 @@ import ( "github.com/openshift/odo/tests/helper" ) -const ( - CI_OPERATOR_HUB_PROJECT = "ci-operator-hub-project" -) - var _ = Describe("odo service command tests for OperatorHub", func() { + var project string + BeforeEach(func() { SetDefaultEventuallyTimeout(10 * time.Minute) SetDefaultConsistentlyDuration(30 * time.Second) - helper.CmdShouldPass("odo", "project", "set", CI_OPERATOR_HUB_PROJECT) // TODO: remove this when OperatorHub integration is fully baked into odo - os.Setenv("ODO_EXPERIMENTAL", "true") + helper.CmdShouldPass("odo", "preference", "set", "Experimental", "true") }) + preSetup := func() { + project = helper.CreateRandProject() + helper.CmdShouldPass("odo", "project", "set", project) + + // wait till oc can see the all operators installed by setup script in the namespace + ocArgs := []string{"get", "csv"} + operators := []string{"etcd", "mongodb"} + for _, operator := range operators { + helper.WaitForCmdOut("oc", ocArgs, 1, true, func(output string) bool { + return strings.Contains(output, operator) + }) + } + } + + cleanPreSetup := func() { + helper.DeleteProject(project) + } + Context("When experimental mode is enabled", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() + }) + It("should list operators installed in the namespace", func() { stdOut := helper.CmdShouldPass("odo", "catalog", "list", "services") - Expect(stdOut).To(ContainSubstring("Operators available in the cluster")) - Expect(stdOut).To(ContainSubstring("mongodb-enterprise")) - Expect(stdOut).To(ContainSubstring("etcdoperator")) + helper.MatchAllInOutput(stdOut, []string{"Operators available in the cluster", "mongodb-enterprise", "etcdoperator"}) }) }) Context("When creating an operator backed service", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() + }) + It("should be able to create EtcdCluster from its alm example", func() { - // First let's grab the etcd operator's name from "odo catalog list services" output operators := helper.CmdShouldPass("odo", "catalog", "list", "services") - etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]`).FindString(operators) - + etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]-clusterwide`).FindString(operators) helper.CmdShouldPass("odo", "service", "create", etcdOperator, "--crd", "EtcdCluster") - pods := helper.CmdShouldPass("oc", "get", "pods", "-n", CI_OPERATOR_HUB_PROJECT) + // now verify if the pods for the operator have started + pods := helper.CmdShouldPass("oc", "get", "pods", "-n", project) // Look for pod with example name because that's the name etcd will give to the pods. etcdPod := regexp.MustCompile(`example-.[a-z0-9]*`).FindString(pods) - ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", CI_OPERATOR_HUB_PROJECT} + ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", project} helper.WaitForCmdOut("oc", ocArgs, 1, true, func(output string) bool { return strings.Contains(output, "Running") }) @@ -62,37 +92,45 @@ var _ = Describe("odo service command tests for OperatorHub", func() { }) Context("When using dry-run option to create operator backed service", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() + }) + It("should only output the definition of the CR that will be used to start service", func() { // First let's grab the etcd operator's name from "odo catalog list services" output operators := helper.CmdShouldPass("odo", "catalog", "list", "services") - etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]`).FindString(operators) + etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]-clusterwide`).FindString(operators) stdOut := helper.CmdShouldPass("odo", "service", "create", etcdOperator, "--crd", "EtcdCluster", "--dry-run") - Expect(stdOut).To(ContainSubstring("apiVersion")) - Expect(stdOut).To(ContainSubstring("kind")) + helper.MatchAllInOutput(stdOut, []string{"apiVersion", "kind"}) }) }) Context("When using from-file option", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() + }) + It("should be able to create a service", func() { // First let's grab the etcd operator's name from "odo catalog list services" output operators := helper.CmdShouldPass("odo", "catalog", "list", "services") - etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]`).FindString(operators) + etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]-clusterwide`).FindString(operators) stdOut := helper.CmdShouldPass("odo", "service", "create", etcdOperator, "--crd", "EtcdCluster", "--dry-run") - // change the metadata.name from example to example2 so that we can run tests parallely - lines := strings.Split(stdOut, "\n") - for i, line := range lines { - if strings.Contains(line, "name: example") { - lines[i] = strings.Replace(lines[i], "example", "example2", 1) - } - } - stdOut = strings.Join(lines, "\n") - // stdOut contains the yaml specification. Store it to a file randomFileName := helper.RandString(6) + ".yaml" - fileName := filepath.Join("/tmp", randomFileName) + fileName := filepath.Join(os.TempDir(), randomFileName) if err := ioutil.WriteFile(fileName, []byte(stdOut), 0644); err != nil { fmt.Printf("Could not write yaml spec to file %s because of the error %v", fileName, err.Error()) } @@ -101,11 +139,11 @@ var _ = Describe("odo service command tests for OperatorHub", func() { helper.CmdShouldPass("odo", "service", "create", "--from-file", fileName) // now verify if the pods for the operator have started - pods := helper.CmdShouldPass("oc", "get", "pods", "-n", CI_OPERATOR_HUB_PROJECT) + pods := helper.CmdShouldPass("oc", "get", "pods", "-n", project) // Look for pod with example name because that's the name etcd will give to the pods. - etcdPod := regexp.MustCompile(`example2-.[a-z0-9]*`).FindString(pods) + etcdPod := regexp.MustCompile(`example-.[a-z0-9]*`).FindString(pods) - ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", CI_OPERATOR_HUB_PROJECT} + ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", project} helper.WaitForCmdOut("oc", ocArgs, 1, true, func(output string) bool { return strings.Contains(output, "Running") }) @@ -113,7 +151,18 @@ var _ = Describe("odo service command tests for OperatorHub", func() { // Delete the pods created. This should idealy be done by `odo // service delete` but that's not implemented for operator backed // services yet. - helper.CmdShouldPass("oc", "delete", "EtcdCluster", "example2") + helper.CmdShouldPass("oc", "delete", "EtcdCluster", "example") + }) + }) + + Context("When using from-file option", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() }) It("should fail to create service if metadata doesn't exist or is invalid", func() { @@ -159,62 +208,63 @@ spec: }) Context("JSON output", func() { + + JustBeforeEach(func() { + preSetup() + }) + + JustAfterEach(func() { + cleanPreSetup() + }) + It("listing catalog of services", func() { jsonOut := helper.CmdShouldPass("odo", "catalog", "list", "services", "-o", "json") - Expect(jsonOut).To(ContainSubstring("mongodb-enterprise")) - Expect(jsonOut).To(ContainSubstring("etcdoperator")) + helper.MatchAllInOutput(jsonOut, []string{"mongodb-enterprise", "etcdoperator"}) }) }) Context("When operator backed services are created", func() { - It("should list the services if they exist", func() { - // First let's grab the etcd operator's name from "odo catalog list services" output - operators := helper.CmdShouldPass("odo", "catalog", "list", "services") - etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]`).FindString(operators) - - stdOut := helper.CmdShouldPass("odo", "service", "create", etcdOperator, "--crd", "EtcdCluster", "--dry-run") - // change the metadata.name from example to example3 so that we can run tests parallely - lines := strings.Split(stdOut, "\n") - for i, line := range lines { - if strings.Contains(line, "name: example") { - lines[i] = strings.Replace(lines[i], "example", "example3", 1) - } - } - stdOut = strings.Join(lines, "\n") + JustBeforeEach(func() { + preSetup() + }) - // stdOut contains the yaml specification. Store it to a file - randomFileName := helper.RandString(6) + ".yaml" - fileName := filepath.Join("/tmp", randomFileName) - if err := ioutil.WriteFile(fileName, []byte(stdOut), 0644); err != nil { - fmt.Printf("Could not write yaml spec to file %s because of the error %v", fileName, err.Error()) - } + JustAfterEach(func() { + cleanPreSetup() + }) - // now create operator backed service - helper.CmdShouldPass("odo", "service", "create", "--from-file", fileName) + It("should list the services if they exist", func() { + operators := helper.CmdShouldPass("odo", "catalog", "list", "services") + etcdOperator := regexp.MustCompile(`etcdoperator\.*[a-z][0-9]\.[0-9]\.[0-9]-clusterwide`).FindString(operators) + helper.CmdShouldPass("odo", "service", "create", etcdOperator, "--crd", "EtcdCluster") // now verify if the pods for the operator have started - pods := helper.CmdShouldPass("oc", "get", "pods", "-n", CI_OPERATOR_HUB_PROJECT) + pods := helper.CmdShouldPass("oc", "get", "pods", "-n", project) // Look for pod with example name because that's the name etcd will give to the pods. - etcdPod := regexp.MustCompile(`example3-.[a-z0-9]*`).FindString(pods) + etcdPod := regexp.MustCompile(`example-.[a-z0-9]*`).FindString(pods) - ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", CI_OPERATOR_HUB_PROJECT} + ocArgs := []string{"get", "pods", etcdPod, "-o", "template=\"{{.status.phase}}\"", "-n", project} helper.WaitForCmdOut("oc", ocArgs, 1, true, func(output string) bool { return strings.Contains(output, "Running") }) - stdOut = helper.CmdShouldPass("odo", "service", "list") - Expect(stdOut).To(ContainSubstring("example")) - Expect(stdOut).To(ContainSubstring("EtcdCluster")) + stdOut := helper.CmdShouldPass("odo", "service", "list") + helper.MatchAllInOutput(stdOut, []string{"example", "EtcdCluster"}) + + // now check for json output + jsonOut := helper.CmdShouldPass("odo", "service", "list", "-o", "json") + helper.MatchAllInOutput(jsonOut, []string{"\"apiVersion\": \"etcd.database.coreos.com/v1beta2\"", "\"kind\": \"EtcdCluster\"", "\"name\": \"example\""}) // Delete the pods created. This should idealy be done by `odo // service delete` but that's not implemented for operator backed // services yet. - helper.CmdShouldPass("oc", "delete", "EtcdCluster", "example3") + helper.CmdShouldPass("oc", "delete", "EtcdCluster", "example") // Now let's check the output again to ensure expected behaviour - stdOut = helper.CmdShouldPass("odo", "service", "list") + stdOut = helper.CmdShouldFail("odo", "service", "list") + jsonOut = helper.CmdShouldFail("odo", "service", "list", "-o", "json") Expect(stdOut).To(ContainSubstring("No operator backed services found in the namesapce")) + helper.MatchAllInOutput(jsonOut, []string{"No operator backed services found in the namesapce", "\"message\": \"No operator backed services found in the namesapce\""}) }) }) }) diff --git a/tests/integration/project/cmd_project_test.go b/tests/integration/project/cmd_project_test.go index 8cd650492fb..d438f4400a0 100644 --- a/tests/integration/project/cmd_project_test.go +++ b/tests/integration/project/cmd_project_test.go @@ -1,6 +1,7 @@ package project import ( + "fmt" "os" "path/filepath" "strings" @@ -48,6 +49,41 @@ var _ = Describe("odo project command tests", func() { }) }) + Context("Should be able to delete a project with --wait", func() { + var projectName string + JustBeforeEach(func() { + projectName = helper.RandString(6) + }) + + It("--wait should work with deleting a project", func() { + + // Create the project + helper.CmdShouldPass("odo", "project", "create", projectName) + + // Delete with --wait + output := helper.CmdShouldPass("odo", "project", "delete", projectName, "-f", "--wait") + Expect(output).To(ContainSubstring("Waiting for project to be deleted")) + + }) + + }) + + Context("Delete the project with flag -o json", func() { + var projectName string + JustBeforeEach(func() { + projectName = helper.RandString(6) + }) + + // odo project delete foobar -o json + It("should be able to delete project and show output in json format", func() { + helper.CmdShouldPass("odo", "project", "create", projectName, "-o", "json") + + actual := helper.CmdShouldPass("odo", "project", "delete", projectName, "-o", "json") + desired := fmt.Sprintf(`{"kind":"Project","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"%s","namespace":"%s","creationTimestamp":null},"message":"Deleted project : %s"}`, projectName, projectName, projectName) + Expect(desired).Should(MatchJSON(actual)) + }) + }) + Context("when running project command app parameter in directory that doesn't contain .odo config directory", func() { It("should successfully execute list along with machine readable output", func() { diff --git a/tests/integration/servicecatalog/cmd_link_unlink_test.go b/tests/integration/servicecatalog/cmd_link_unlink_test.go index 468338e3b80..813b46d822a 100644 --- a/tests/integration/servicecatalog/cmd_link_unlink_test.go +++ b/tests/integration/servicecatalog/cmd_link_unlink_test.go @@ -210,15 +210,11 @@ var _ = Describe("odo link and unlink command tests", func() { // tests for linking a component to a component stdOut := helper.CmdShouldPass("odo", "link", "component2", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were added")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_HOST")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_PORT")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were added", "COMPONENT_COMPONENT2_HOST", "COMPONENT_COMPONENT2_PORT"}) // tests for unlinking a component from a component stdOut = helper.CmdShouldPass("odo", "unlink", "component2", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were removed")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_HOST")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_PORT")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were removed", "COMPONENT_COMPONENT2_HOST", "COMPONENT_COMPONENT2_PORT"}) // first create a service helper.CmdShouldPass("odo", "service", "create", "-w", "dh-postgresql-apb", "--project", project, "--plan", "dev", @@ -231,15 +227,11 @@ var _ = Describe("odo link and unlink command tests", func() { // tests for linking a service to a component stdOut = helper.CmdShouldPass("odo", "link", "dh-postgresql-apb", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were added")) - Expect(stdOut).To(ContainSubstring("DB_PORT")) - Expect(stdOut).To(ContainSubstring("DB_HOST")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were added", "DB_PORT", "DB_HOST"}) // tests for unlinking a service to a component stdOut = helper.CmdShouldPass("odo", "unlink", "dh-postgresql-apb", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were removed")) - Expect(stdOut).To(ContainSubstring("DB_PORT")) - Expect(stdOut).To(ContainSubstring("DB_HOST")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were removed", "DB_PORT", "DB_HOST"}) }) }) }) diff --git a/tests/integration/servicecatalog/cmd_service_test.go b/tests/integration/servicecatalog/cmd_service_test.go index a4106c16330..3894fc17161 100644 --- a/tests/integration/servicecatalog/cmd_service_test.go +++ b/tests/integration/servicecatalog/cmd_service_test.go @@ -257,8 +257,7 @@ var _ = Describe("odo service command tests", func() { // Check json output stdOut = helper.CmdShouldPass("odo", "service", "list", "-o", "json") - Expect(stdOut).To(ContainSubstring("dh-prometheus-apb")) - Expect(stdOut).To(ContainSubstring("ServiceList")) + helper.MatchAllInOutput(stdOut, []string{"dh-prometheus-apb", "ServiceList"}) // cd to a non-component directory and list services helper.Chdir(originalDir) @@ -268,8 +267,7 @@ var _ = Describe("odo service command tests", func() { // Check json output helper.Chdir(originalDir) stdOut = helper.CmdShouldPass("odo", "service", "list", "--app", app, "--project", project, "-o", "json") - Expect(stdOut).To(ContainSubstring("dh-prometheus-apb")) - Expect(stdOut).To(ContainSubstring("ServiceList")) + helper.MatchAllInOutput(stdOut, []string{"dh-prometheus-apb", "ServiceList"}) }) @@ -384,15 +382,11 @@ var _ = Describe("odo service command tests", func() { // tests for linking a component to a component stdOut := helper.CmdShouldPass("odo", "link", "component2", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were added")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_HOST")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_PORT")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were added", "COMPONENT_COMPONENT2_HOST", "COMPONENT_COMPONENT2_PORT"}) // tests for unlinking a component from a component stdOut = helper.CmdShouldPass("odo", "unlink", "component2", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were removed")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_HOST")) - Expect(stdOut).To(ContainSubstring("COMPONENT_COMPONENT2_PORT")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were removed", "COMPONENT_COMPONENT2_HOST", "COMPONENT_COMPONENT2_PORT"}) // first create a service helper.CmdShouldPass("odo", "service", "create", "-w", "dh-postgresql-apb", "--project", project, "--plan", "dev", @@ -405,15 +399,11 @@ var _ = Describe("odo service command tests", func() { // tests for linking a service to a component stdOut = helper.CmdShouldPass("odo", "link", "dh-postgresql-apb", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were added")) - Expect(stdOut).To(ContainSubstring("DB_PORT")) - Expect(stdOut).To(ContainSubstring("DB_HOST")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were added", "DB_PORT", "DB_HOST"}) // tests for unlinking a service to a component stdOut = helper.CmdShouldPass("odo", "unlink", "dh-postgresql-apb", "--context", context1) - Expect(stdOut).To(ContainSubstring("The below secret environment variables were removed")) - Expect(stdOut).To(ContainSubstring("DB_PORT")) - Expect(stdOut).To(ContainSubstring("DB_HOST")) + helper.MatchAllInOutput(stdOut, []string{"The below secret environment variables were removed", "DB_PORT", "DB_HOST"}) }) }) diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index d11ed239b6d..a39ffcb7be2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -56,19 +56,21 @@ type ImageMetadata struct { // Container contains response of Engine API: // GET "/containers/json" type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig container.HostConfig + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } NetworkSettings *SummaryNetworkSettings Mounts []MountPoint } diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 1d998bf37d9..1b2b7f82e9e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -37,6 +37,7 @@ type OperatorHubStatus struct { // the state of the default hub sources for OperatorHub on the cluster from // enabled to disabled and vice versa. // +kubebuilder:subresource:status +// +genclient // +genclient:nonNamespaced type OperatorHub struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/oc/go.mod b/vendor/github.com/openshift/oc/go.mod index ee9e52fbdcb..7b7dee7c9d6 100644 --- a/vendor/github.com/openshift/oc/go.mod +++ b/vendor/github.com/openshift/oc/go.mod @@ -46,7 +46,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.1.0 // indirect - golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 + golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 diff --git a/vendor/github.com/openshift/oc/go.sum b/vendor/github.com/openshift/oc/go.sum index 0d9ce55f65f..cfcccbbc827 100644 --- a/vendor/github.com/openshift/oc/go.sum +++ b/vendor/github.com/openshift/oc/go.sum @@ -834,6 +834,8 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1X golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/Makefile b/vendor/github.com/operator-framework/operator-lifecycle-manager/Makefile index 1db839cf087..3a90111e012 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/Makefile +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/Makefile @@ -119,7 +119,7 @@ setup-bare: clean e2e.namespace e2e: go test -v $(MOD_FLAGS) -failfast -timeout 70m ./test/e2e/... -namespace=openshift-operators -kubeconfig=${KUBECONFIG} -olmNamespace=openshift-operator-lifecycle-manager -dummyImage=bitnami/nginx:latest -e2e-local: build-linux build-wait +e2e-local: build-linux build-wait build-util-linux . ./scripts/build_local.sh . ./scripts/run_e2e_local.sh $(TEST) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go index 7d713b89e94..6b68decd3f4 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go @@ -33,6 +33,27 @@ func (b *BundleUnpackResult) Bundle() *api.Bundle { return b.bundle } +func (b *BundleUnpackResult) Name() string { + return b.name +} + +// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found. +func (b *BundleUnpackResult) SetCondition(cond operatorsv1alpha1.BundleLookupCondition) operatorsv1alpha1.BundleLookupCondition { + for i, existing := range b.Conditions { + if existing.Type != cond.Type { + continue + } + if existing.Status == cond.Status && existing.Reason == cond.Reason { + cond.LastTransitionTime = existing.LastTransitionTime + } + b.Conditions[i] = cond + return cond + } + b.Conditions = append(b.Conditions, cond) + + return cond +} + var catalogSourceGVK = operatorsv1alpha1.SchemeGroupVersion.WithKind(operatorsv1alpha1.CatalogSourceKind) func newBundleUnpackResult(lookup *operatorsv1alpha1.BundleLookup) *BundleUnpackResult { @@ -250,16 +271,26 @@ const ( CatalogSourceMissingMessage = "referenced catalogsource not found" JobIncompleteReason = "JobIncomplete" JobIncompleteMessage = "unpack job not completed" + JobNotStartedReason = "JobNotStarted" + JobNotStartedMessage = "unpack job not yet started" + NotUnpackedReason = "BundleNotUnpacked" + NotUnpackedMessage = "bundle contents have not yet been persisted to installplan status" ) func (c *ConfigMapUnpacker) UnpackBundle(lookup *operatorsv1alpha1.BundleLookup) (result *BundleUnpackResult, err error) { result = newBundleUnpackResult(lookup) + + // if pending condition is missing, bundle has already been unpacked cond := result.GetCondition(operatorsv1alpha1.BundleLookupPending) + if cond.Status == corev1.ConditionUnknown { + return result, nil + } + now := c.now() var cs *operatorsv1alpha1.CatalogSource if cs, err = c.csLister.CatalogSources(result.CatalogSourceRef.Namespace).Get(result.CatalogSourceRef.Name); err != nil { - if apierrors.IsNotFound(err) && cond.Status != corev1.ConditionTrue && cond.Reason != CatalogSourceMissingReason { + if apierrors.IsNotFound(err) && cond.Reason != CatalogSourceMissingReason { cond.Status = corev1.ConditionTrue cond.Reason = CatalogSourceMissingReason cond.Message = CatalogSourceMissingMessage @@ -303,7 +334,7 @@ func (c *ConfigMapUnpacker) UnpackBundle(lookup *operatorsv1alpha1.BundleLookup) return } - if !jobConditionTrue(job, batchv1.JobComplete) && cond.Status != corev1.ConditionTrue && cond.Reason != JobIncompleteReason { + if !jobConditionTrue(job, batchv1.JobComplete) && (cond.Status != corev1.ConditionTrue || cond.Reason != JobIncompleteReason) { cond.Status = corev1.ConditionTrue cond.Reason = JobIncompleteReason cond.Message = JobIncompleteMessage @@ -318,6 +349,10 @@ func (c *ConfigMapUnpacker) UnpackBundle(lookup *operatorsv1alpha1.BundleLookup) return } + if result.Bundle() == nil || len(result.Bundle().GetObject()) == 0 { + return + } + // A successful load should remove the pending condition result.RemoveCondition(operatorsv1alpha1.BundleLookupPending) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go index 131a02f8a2f..613381b1c27 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go @@ -71,6 +71,14 @@ func TestConfigMapUnpacker(t *testing.T) { Namespace: "ns-a", Name: "src-a", }, + Conditions: []operatorsv1alpha1.BundleLookupCondition{ + { + Type: operatorsv1alpha1.BundleLookupPending, + Status: corev1.ConditionTrue, + Reason: JobNotStartedReason, + Message: JobNotStartedMessage, + }, + }, }, }, expected: expected{ @@ -116,6 +124,14 @@ func TestConfigMapUnpacker(t *testing.T) { Namespace: "ns-a", Name: "src-a", }, + Conditions: []operatorsv1alpha1.BundleLookupCondition{ + { + Type: operatorsv1alpha1.BundleLookupPending, + Status: corev1.ConditionTrue, + Reason: JobNotStartedReason, + Message: JobNotStartedMessage, + }, + }, }, }, expected: expected{ diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/manifests.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/manifests.go new file mode 100644 index 00000000000..256b2289fec --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/manifests.go @@ -0,0 +1,109 @@ +package catalog + +import ( + "encoding/json" + "fmt" + + "github.com/operator-framework/operator-registry/pkg/configmap" + errorwrap "github.com/pkg/errors" + "github.com/sirupsen/logrus" + v1 "k8s.io/client-go/listers/core/v1" + + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" +) + +// ManifestResolver can dereference a manifest for a step. Steps may embed manifests directly or reference content +// in configmaps +type ManifestResolver interface { + ManifestForStep(step *v1alpha1.Step) (string, error) +} + +// manifestResolver caches manifest from unpacked bundles (via configmaps) +type manifestResolver struct { + configMapLister v1.ConfigMapLister + unpackedSteps map[string][]v1alpha1.StepResource + namespace string + logger logrus.FieldLogger +} + +func newManifestResolver(namespace string, configMapLister v1.ConfigMapLister, logger logrus.FieldLogger) *manifestResolver { + return &manifestResolver{ + namespace: namespace, + configMapLister: configMapLister, + unpackedSteps: map[string][]v1alpha1.StepResource{}, + logger: logger, + } +} + +// ManifestForStep always returns the manifest that should be applied to the cluster for a given step +// the manifest field in the installplan status can contain a reference to a configmap instead +func (r *manifestResolver) ManifestForStep(step *v1alpha1.Step) (string, error) { + manifest := step.Resource.Manifest + ref := refForStep(step, r.logger) + if ref == nil { + return manifest, nil + } + + log := r.logger.WithFields(logrus.Fields{"resolving": step.Resolving, "step": step.Resource.Name}) + log.WithField("ref", ref).Debug("step is a reference to configmap") + + usteps, err := r.unpackedStepsForBundle(step.Resolving, ref) + if err != nil { + return "", err + } + + log.Debugf("checking cache for unpacked step") + // need to find the real manifest from the unpacked steps + for _, u := range usteps { + if u.Name == step.Resource.Name && + u.Kind == step.Resource.Kind && + u.Version == step.Resource.Version && + u.Group == step.Resource.Group { + manifest = u.Manifest + log.WithField("manifest", manifest).Debug("step replaced with unpacked value") + break + } + } + if manifest == step.Resource.Manifest { + return "", fmt.Errorf("couldn't find unpacked step for %v", step) + } + return manifest, nil +} + +func (r *manifestResolver) unpackedStepsForBundle(bundleName string, ref *UnpackedBundleReference) ([]v1alpha1.StepResource, error) { + usteps, ok := r.unpackedSteps[bundleName] + if ok { + return usteps, nil + } + cm, err := r.configMapLister.ConfigMaps(ref.Namespace).Get(ref.Name) + if err != nil { + return nil, errorwrap.Wrapf(err, "error finding unpacked bundle configmap for ref %v", *ref) + } + loader := configmap.NewBundleLoader() + bundle, err := loader.Load(cm) + if err != nil { + return nil, errorwrap.Wrapf(err, "error loading unpacked bundle configmap for ref %v", *ref) + } + steps, err := resolver.NewStepResourceFromBundle(bundle, r.namespace, ref.Replaces, ref.CatalogSourceName, ref.CatalogSourceNamespace) + if err != nil { + return nil, errorwrap.Wrapf(err, "error calculating steps for ref %v", *ref) + } + r.unpackedSteps[bundleName] = steps + return steps, nil +} + +func refForStep(step *v1alpha1.Step, log logrus.FieldLogger) *UnpackedBundleReference { + log = log.WithFields(logrus.Fields{"resolving": step.Resolving, "step": step.Resource.Name}) + var ref UnpackedBundleReference + if err := json.Unmarshal([]byte(step.Resource.Manifest), &ref); err != nil { + log.Debug("step is not a reference to an unpacked bundle (this is not an error if the step is a manifest)") + return nil + } + log = log.WithField("ref", ref) + if ref.Kind != "ConfigMap" || ref.Name == "" || ref.Namespace == "" || ref.CatalogSourceName == "" || ref.CatalogSourceNamespace == "" { + log.Debug("step is not a reference to an unpacked bundle (this is not an error if the step is a manifest)") + return nil + } + return &ref +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go index 33097785089..21a28fc0dd4 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go @@ -1098,6 +1098,16 @@ func (o *Operator) createInstallPlan(namespace string, gen int, subs []*v1alpha1 return reference.GetReference(res) } +type UnpackedBundleReference struct { + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + CatalogSourceName string `json:"catalogSourceName"` + CatalogSourceNamespace string `json:"catalogSourceNamespace"` + Replaces string `json:"replaces"` +} + +// unpackBundles makes one walk through the bundlelookups and attempts to progress them func (o *Operator) unpackBundles(plan *v1alpha1.InstallPlan) (bool, *v1alpha1.InstallPlan, error) { out := plan.DeepCopy() unpacked := true @@ -1110,32 +1120,53 @@ func (o *Operator) unpackBundles(plan *v1alpha1.InstallPlan) (bool, *v1alpha1.In errs = append(errs, err) continue } + out.Status.BundleLookups[i] = *res.BundleLookup - if res == nil { + // if pending condition is present, still waiting for the job to unpack to configmap + if res.GetCondition(v1alpha1.BundleLookupPending).Status == corev1.ConditionTrue { unpacked = false continue } - out.Status.BundleLookups[i] = *res.BundleLookup - if res.Bundle() == nil || len(res.Bundle().GetObject()) == 0 { - unpacked = false + // if packed condition is missing, bundle has already been unpacked into steps, continue + if res.GetCondition(resolver.BundleLookupConditionPacked).Status == corev1.ConditionUnknown { continue } + // Ensure that bundle can be applied by the current version of OLM by converting to steps steps, err := resolver.NewStepsFromBundle(res.Bundle(), out.GetNamespace(), res.Replaces, res.CatalogSourceRef.Name, res.CatalogSourceRef.Namespace) if err != nil { - errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %s", err.Error())) + errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %v", err)) unpacked = false continue } - // Add steps and remove resolved bundle lookup + // step manifests are replaced with references to the configmap containing them + for i, s := range steps { + ref := UnpackedBundleReference{ + Kind: "ConfigMap", + Namespace: res.CatalogSourceRef.Namespace, + Name: res.Name(), + CatalogSourceName: res.CatalogSourceRef.Name, + CatalogSourceNamespace: res.CatalogSourceRef.Namespace, + Replaces: res.Replaces, + } + r, err := json.Marshal(&ref) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate reference for configmap: %v", err)) + unpacked = false + continue + } + s.Resource.Manifest = string(r) + steps[i] = s + } + res.RemoveCondition(resolver.BundleLookupConditionPacked) + out.Status.BundleLookups[i] = *res.BundleLookup out.Status.Plan = append(out.Status.Plan, steps...) - out.Status.BundleLookups = append(out.Status.BundleLookups[:i], out.Status.BundleLookups[i+1:]...) - i-- } if err := utilerrors.NewAggregate(errs); err != nil { + o.logger.Debugf("failed to unpack bundles: %v", err) return false, nil, err } @@ -1436,6 +1467,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { } ensurer := newStepEnsurer(kubeclient, crclient, dynamicClient) + r := newManifestResolver(plan.GetNamespace(), o.lister.CoreV1().ConfigMapLister(), o.logger) for i, step := range plan.Status.Plan { switch step.Status { @@ -1474,12 +1506,16 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { continue } case v1alpha1.StepStatusUnknown, v1alpha1.StepStatusNotPresent: + manifest, err := r.ManifestForStep(step) + if err != nil { + return err + } o.logger.WithFields(logrus.Fields{"kind": step.Resource.Kind, "name": step.Resource.Name}).Debug("execute resource") switch step.Resource.Kind { case crdKind: // Marshal the manifest into a CRD instance. var crd v1beta1ext.CustomResourceDefinition - err := json.Unmarshal([]byte(step.Resource.Manifest), &crd) + err := json.Unmarshal([]byte(manifest), &crd) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1546,7 +1582,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case v1alpha1.ClusterServiceVersionKind: // Marshal the manifest into a CSV instance. var csv v1alpha1.ClusterServiceVersion - err := json.Unmarshal([]byte(step.Resource.Manifest), &csv) + err := json.Unmarshal([]byte(manifest), &csv) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1579,7 +1615,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case v1alpha1.SubscriptionKind: // Marshal the manifest into a subscription instance. var sub v1alpha1.Subscription - err := json.Unmarshal([]byte(step.Resource.Manifest), &sub) + err := json.Unmarshal([]byte(manifest), &sub) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1612,7 +1648,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case clusterRoleKind: // Marshal the manifest into a ClusterRole instance. var cr rbacv1.ClusterRole - err := json.Unmarshal([]byte(step.Resource.Manifest), &cr) + err := json.Unmarshal([]byte(manifest), &cr) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1627,7 +1663,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case clusterRoleBindingKind: // Marshal the manifest into a RoleBinding instance. var rb rbacv1.ClusterRoleBinding - err := json.Unmarshal([]byte(step.Resource.Manifest), &rb) + err := json.Unmarshal([]byte(manifest), &rb) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1642,7 +1678,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case roleKind: // Marshal the manifest into a Role instance. var r rbacv1.Role - err := json.Unmarshal([]byte(step.Resource.Manifest), &r) + err := json.Unmarshal([]byte(manifest), &r) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1665,7 +1701,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case roleBindingKind: // Marshal the manifest into a RoleBinding instance. var rb rbacv1.RoleBinding - err := json.Unmarshal([]byte(step.Resource.Manifest), &rb) + err := json.Unmarshal([]byte(manifest), &rb) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1688,7 +1724,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case serviceAccountKind: // Marshal the manifest into a ServiceAccount instance. var sa corev1.ServiceAccount - err := json.Unmarshal([]byte(step.Resource.Manifest), &sa) + err := json.Unmarshal([]byte(manifest), &sa) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1711,7 +1747,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { case serviceKind: // Marshal the manifest into a Service instance var s corev1.Service - err := json.Unmarshal([]byte(step.Resource.Manifest), &s) + err := json.Unmarshal([]byte(manifest), &s) if err != nil { return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) } @@ -1739,7 +1775,7 @@ func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { } // Marshal the manifest into an unstructured object - dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(step.Resource.Manifest), 10) + dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) unstructuredObject := &unstructured.Unstructured{} if err := dec.Decode(unstructuredObject); err != nil { return errorwrap.Wrapf(err, "error decoding %s object to an unstructured object", step.Resource.Name) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go index 22970a5022c..2ba753dacbf 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go @@ -764,6 +764,8 @@ func (a *Operator) handleClusterServiceVersionDeletion(obj interface{}) { "phase": clusterServiceVersion.Status.Phase, }) + metrics.DeleteCSVMetric(clusterServiceVersion) + defer func(csv v1alpha1.ClusterServiceVersion) { if clusterServiceVersion.IsCopied() { logger.Debug("deleted csv is copied. skipping operatorgroup requeue") diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac.go index 17ebf610b7b..19029aa5b24 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac.go @@ -2,19 +2,30 @@ package resolver import ( "fmt" + "hash/fnv" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apiserver/pkg/storage/names" + utilrand "k8s.io/apimachinery/pkg/util/rand" + hashutil "k8s.io/kubernetes/pkg/util/hash" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" ) -var generateName = func(base string) string { - return names.SimpleNameGenerator.GenerateName(base + "-") +const maxNameLength = 63 + +func generateName(base string, o interface{}) string { + hasher := fnv.New32a() + hashutil.DeepHashObject(hasher, o) + hash := utilrand.SafeEncodeString(fmt.Sprint(hasher.Sum32())) + if len(base)+len(hash) > maxNameLength { + base = base[:maxNameLength - len(hash) - 1] + } + + return fmt.Sprintf("%s-%s", base, hash) } type OperatorPermissions struct { @@ -82,7 +93,7 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri // Create Role role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: generateName(csv.GetName()), + Name: generateName(csv.GetName(), permission.Rules), Namespace: csv.GetNamespace(), OwnerReferences: []metav1.OwnerReference{ownerutil.NonBlockingOwner(csv)}, Labels: ownerutil.OwnerLabel(csv, v1alpha1.ClusterServiceVersionKind), @@ -94,7 +105,7 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri // Create RoleBinding roleBinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: generateName(fmt.Sprintf("%s-%s", role.GetName(), permission.ServiceAccountName)), + Name: generateName(role.GetName(), role), Namespace: csv.GetNamespace(), OwnerReferences: []metav1.OwnerReference{ownerutil.NonBlockingOwner(csv)}, Labels: ownerutil.OwnerLabel(csv, v1alpha1.ClusterServiceVersionKind), @@ -126,7 +137,7 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri // Create ClusterRole role := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: generateName(csv.GetName()), + Name: generateName(csv.GetName(), permission.Rules), Labels: ownerutil.OwnerLabel(csv, v1alpha1.ClusterServiceVersionKind), }, Rules: permission.Rules, @@ -136,7 +147,7 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri // Create ClusterRoleBinding roleBinding := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: generateName(fmt.Sprintf("%s-%s", role.GetName(), permission.ServiceAccountName)), + Name: generateName(role.GetName(), role), Namespace: csv.GetNamespace(), Labels: ownerutil.OwnerLabel(csv, v1alpha1.ClusterServiceVersionKind), }, diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac_test.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac_test.go new file mode 100644 index 00000000000..060f99d758d --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/rbac_test.go @@ -0,0 +1,65 @@ +package resolver + +import ( + "github.com/stretchr/testify/require" + "math/rand" + "reflect" + "strings" + "testing" + "testing/quick" +) + +func TestGenerateName(t *testing.T) { + type args struct { + base string + o interface{} + } + tests := []struct { + name string + args args + want string + }{ + { + name: "generate", + args: args{ + base: "myname", + o: []string{"something"}, + }, + want: "myname-9c895f74f", + }, + { + name: "truncated", + args: args{ + base: strings.Repeat("name", 100), + o: []string{"something", "else"}, + }, + want: "namenamenamenamenamenamenamenamenamenamenamenamename-78fd8b4d6b", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := generateName(tt.args.base, tt.args.o) + require.Equal(t, tt.want, got) + require.LessOrEqual(t, len(got), maxNameLength) + }) + } +} + +var runeSet = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-") + +type validKubeName string + +func (validKubeName) Generate(rand *rand.Rand, size int) reflect.Value { + b := make([]rune, size) + for i := range b { + b[i] = runeSet[rand.Intn(len(runeSet))] + } + return reflect.ValueOf(validKubeName(b)) +} + +func TestGeneratesWithinRange(t *testing.T) { + f := func(base validKubeName, o string) bool { + return len(generateName(string(base), o)) <= maxNameLength + } + require.NoError(t, quick.Check(f, nil)) +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver.go index c7d1d31a691..45611242f5a 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver.go @@ -15,9 +15,14 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" v1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" + controllerbundle "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" ) +const ( + BundleLookupConditionPacked v1alpha1.BundleLookupConditionType = "BundleLookupNotPersisted" +) + var timeNow = func() metav1.Time { return metav1.NewTime(time.Now().UTC()) } type Resolver interface { @@ -116,6 +121,20 @@ func (r *OperatorsV1alpha1Resolver) ResolveSteps(namespace string, sourceQuerier Namespace: op.SourceInfo().Catalog.Namespace, Name: op.SourceInfo().Catalog.Name, }, + Conditions: []v1alpha1.BundleLookupCondition{ + { + Type: BundleLookupConditionPacked, + Status: corev1.ConditionTrue, + Reason: controllerbundle.NotUnpackedReason, + Message: controllerbundle.NotUnpackedMessage, + }, + { + Type: v1alpha1.BundleLookupPending, + Status: corev1.ConditionTrue, + Reason: controllerbundle.JobNotStartedReason, + Message: controllerbundle.JobNotStartedMessage, + }, + }, }) } diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver_test.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver_test.go index c6b3559627d..58cb4c352b5 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver_test.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/resolver_test.go @@ -20,6 +20,7 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" + controllerbundle "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" ) @@ -126,6 +127,20 @@ func TestNamespaceResolver(t *testing.T) { Namespace: catalog.Namespace, Name: catalog.Name, }, + Conditions: []v1alpha1.BundleLookupCondition{ + { + Type: BundleLookupConditionPacked, + Status: corev1.ConditionTrue, + Reason: controllerbundle.NotUnpackedReason, + Message: controllerbundle.NotUnpackedMessage, + }, + { + Type: v1alpha1.BundleLookupPending, + Status: corev1.ConditionTrue, + Reason: controllerbundle.JobNotStartedReason, + Message: controllerbundle.JobNotStartedMessage, + }, + }, }, }, subs: []*v1alpha1.Subscription{ @@ -241,6 +256,20 @@ func TestNamespaceResolver(t *testing.T) { Namespace: catalog.Namespace, Name: catalog.Name, }, + Conditions: []v1alpha1.BundleLookupCondition{ + { + Type: BundleLookupConditionPacked, + Status: corev1.ConditionTrue, + Reason: controllerbundle.NotUnpackedReason, + Message: controllerbundle.NotUnpackedMessage, + }, + { + Type: v1alpha1.BundleLookupPending, + Status: corev1.ConditionTrue, + Reason: controllerbundle.JobNotStartedReason, + Message: controllerbundle.JobNotStartedMessage, + }, + }, }, }, subs: []*v1alpha1.Subscription{ @@ -540,10 +569,6 @@ func TestNamespaceResolver(t *testing.T) { } func TestNamespaceResolverRBAC(t *testing.T) { - generateName = func(base string) string { - return "a" - } - namespace := "catsrc-namespace" catalog := CatalogKey{"catsrc", namespace} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/metrics/metrics.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/metrics/metrics.go index 2163a31050c..336199aae73 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/metrics/metrics.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/metrics/metrics.go @@ -188,6 +188,12 @@ func CounterForSubscription(name, installedCSV, channelName, packageName string) return SubscriptionSyncCount.WithLabelValues(name, installedCSV, channelName, packageName) } +func DeleteCSVMetric(oldCSV *olmv1alpha1.ClusterServiceVersion) { + // Delete the old CSV metrics + csvAbnormal.DeleteLabelValues(oldCSV.Namespace, oldCSV.Name, oldCSV.Spec.Version.String(), string(oldCSV.Status.Phase), string(oldCSV.Status.Reason)) + csvSucceeded.DeleteLabelValues(oldCSV.Namespace, oldCSV.Name, oldCSV.Spec.Version.String()) +} + func EmitCSVMetric(oldCSV *olmv1alpha1.ClusterServiceVersion, newCSV *olmv1alpha1.ClusterServiceVersion) { if oldCSV == nil || newCSV == nil { return diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/test/e2e/metrics_e2e_test.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/test/e2e/metrics_e2e_test.go index 03155294382..2fe0a88bbbf 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/test/e2e/metrics_e2e_test.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/test/e2e/metrics_e2e_test.go @@ -37,7 +37,6 @@ func TestMetricsEndpoint(t *testing.T) { cleanupCSV, err := createCSV(t, c, crc, failingCSV, testNamespace, false, false) require.NoError(t, err) - defer cleanupCSV() _, err = fetchCSV(t, crc, failingCSV.Name, testNamespace, csvFailedChecker) require.NoError(t, err) @@ -53,9 +52,16 @@ func TestMetricsEndpoint(t *testing.T) { require.Contains(t, rawOutput, "phase=\"Failed\"") require.Contains(t, rawOutput, "reason=\"UnsupportedOperatorGroup\"") require.Contains(t, rawOutput, "version=\"0.0.0\"") - require.Contains(t, rawOutput, "csv_succeeded") - log.Info(rawOutput) + + cleanupCSV() + + rawOutput, err = getMetricsFromPod(t, c, getOLMPodName(t, c), operatorNamespace, "8081") + if err != nil { + t.Fatalf("Failed to retrieve metrics from OLM pod because of: %v\n", err) + } + require.NotContains(t, rawOutput, "csv_abnormal{name=\""+failingCSV.Name+"\"") + require.NotContains(t, rawOutput, "csv_succeeded{name=\""+failingCSV.Name+"\"") } func getOLMPodName(t *testing.T, client operatorclient.ClientInterface) string { diff --git a/vendor/github.com/tektoncd/pipeline/.golangci.yml b/vendor/github.com/tektoncd/pipeline/.golangci.yml index 2269e0d2eb6..963ff69ae5a 100644 --- a/vendor/github.com/tektoncd/pipeline/.golangci.yml +++ b/vendor/github.com/tektoncd/pipeline/.golangci.yml @@ -24,6 +24,8 @@ run: issues-exit-code: 1 build-tags: - e2e + skip-files: + - .*/zz_generated.deepcopy.go skip-dirs: - vendor - pkg/client diff --git a/vendor/github.com/tektoncd/pipeline/OWNERS_ALIASES b/vendor/github.com/tektoncd/pipeline/OWNERS_ALIASES new file mode 100644 index 00000000000..5025a4e5673 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/OWNERS_ALIASES @@ -0,0 +1,35 @@ +aliases: + pipeline-approvers: + - afrittoli + - bobcatfish + - dibyom + - dlorenc + - ImJasonH + - sbwsg + - vdemeester + + apis-approvers: + - afrittoli + - bobcatfish + - dibyom + - dlorenc + - ImJasonH + - sbwsg + - vdemeester + + productivity-approvers: + - afrittoli + - bobcatfish + - vdemeester + productivity-reviewers: + - dibyom + - dlorenc + - ImJasonH + - sbwsg + +# Alumni ❤️ +# tejal29 +# pivotal-nader-ziada +# shashwathi +# aaron-prindle +# abayer diff --git a/vendor/github.com/tektoncd/pipeline/config/200-clusterrole.yaml b/vendor/github.com/tektoncd/pipeline/config/200-clusterrole.yaml index d46ab8022c9..30c86613cfd 100644 --- a/vendor/github.com/tektoncd/pipeline/config/200-clusterrole.yaml +++ b/vendor/github.com/tektoncd/pipeline/config/200-clusterrole.yaml @@ -106,3 +106,16 @@ rules: resources: ["podsecuritypolicies"] resourceNames: ["tekton-pipelines"] verbs: ["use"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-leader-election + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + # We uses leases for leaderelection + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] diff --git a/vendor/github.com/tektoncd/pipeline/config/200-role.yaml b/vendor/github.com/tektoncd/pipeline/config/200-role.yaml index d91c3c01311..10682ab046c 100644 --- a/vendor/github.com/tektoncd/pipeline/config/200-role.yaml +++ b/vendor/github.com/tektoncd/pipeline/config/200-role.yaml @@ -48,7 +48,7 @@ rules: - apiGroups: [""] resources: ["configmaps"] verbs: ["get"] - resourceNames: ["config-logging", "config-observability"] + resourceNames: ["config-logging", "config-observability", "config-leader-election"] - apiGroups: [""] resources: ["secrets"] verbs: ["list", "watch"] diff --git a/vendor/github.com/tektoncd/pipeline/config/201-clusterrolebinding.yaml b/vendor/github.com/tektoncd/pipeline/config/201-clusterrolebinding.yaml index f0b915c1e18..cc212710602 100644 --- a/vendor/github.com/tektoncd/pipeline/config/201-clusterrolebinding.yaml +++ b/vendor/github.com/tektoncd/pipeline/config/201-clusterrolebinding.yaml @@ -29,6 +29,23 @@ roleRef: name: tekton-pipelines-controller-cluster-access apiGroup: rbac.authorization.k8s.io --- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: tekton-pipelines-controller-leaderelection + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-leader-election + apiGroup: rbac.authorization.k8s.io +--- # If this ClusterRoleBinding is replaced with a RoleBinding # then the ClusterRole would be namespaced. The access described by # the tekton-pipelines-controller-tenant-access ClusterRole would @@ -66,3 +83,21 @@ roleRef: kind: ClusterRole name: tekton-pipelines-webhook-cluster-access apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: tekton-pipelines-webhook-leaderelection + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-webhook + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-leader-election + apiGroup: rbac.authorization.k8s.io + diff --git a/vendor/github.com/tektoncd/pipeline/docs/migrating-v1alpha1-to-v1beta1.md b/vendor/github.com/tektoncd/pipeline/docs/migrating-v1alpha1-to-v1beta1.md index 38ee55859c6..bd5cee793f8 100644 --- a/vendor/github.com/tektoncd/pipeline/docs/migrating-v1alpha1-to-v1beta1.md +++ b/vendor/github.com/tektoncd/pipeline/docs/migrating-v1alpha1-to-v1beta1.md @@ -76,7 +76,7 @@ removed entirely, and until this has been resolved, we encourage people to use ` `PipelineResources` when they can. _More on the reasoning and what's left to do in -[Why aren't PipelineResources in Beta?](docs/resources.md#why-arent-pipelineresources-in-beta)._ +[Why aren't PipelineResources in Beta?](resources.md#why-arent-pipelineresources-in-beta)._ To ease migration away from `PipelineResources` [some types have an equivalent `Task` in the Catalog](#pipelineresource-equivalent-catalog-tasks). diff --git a/vendor/github.com/tektoncd/pipeline/docs/resources.md b/vendor/github.com/tektoncd/pipeline/docs/resources.md index 05da3adedbf..aa3e3043822 100644 --- a/vendor/github.com/tektoncd/pipeline/docs/resources.md +++ b/vendor/github.com/tektoncd/pipeline/docs/resources.md @@ -330,7 +330,7 @@ Params that can be added are the following: is used. [git checkout][git-checkout] is used to switch to the revision, and will result in a detached HEAD in most cases. Use refspec along with revision if you want to checkout a particular branch without a - detached HEAD. _If no revision is specified, the resource will default to `master`._ + detached HEAD. _If no revision is specified, the resource inspects remote repository to determine the correct default branch._ 1. `refspec`: (Optional) specify a git [refspec][git-refspec] to pass to git-fetch. Note that if this field is specified, it must specify all refs, branches, tags, or commits required to checkout the specified `revision`. An additional fetch diff --git a/vendor/github.com/tektoncd/pipeline/docs/tasks.md b/vendor/github.com/tektoncd/pipeline/docs/tasks.md index 244c71a47e0..042daec6399 100644 --- a/vendor/github.com/tektoncd/pipeline/docs/tasks.md +++ b/vendor/github.com/tektoncd/pipeline/docs/tasks.md @@ -627,7 +627,7 @@ $(workspaces.myworkspace.volume) You can substitute `Volume` names and [types](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) by parameterizing them. Tekton supports popular `Volume` types such as `ConfigMap`, `Secret`, and `PersistentVolumeClaim`. -See this [example](#using-kubernetes-configmap-as-volume-source) to find out how to perform this type of substitution +See this [example](#mounting-a-configmap-as-a-volume-source) to find out how to perform this type of substitution in your `Task.` ## Code examples diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/clustertask-pipelinerun.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/clustertask-pipelinerun.yaml index 9c1818d73e7..c9bf0d776af 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/clustertask-pipelinerun.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/clustertask-pipelinerun.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1alpha1 kind: ClusterTask metadata: - name: cluster-task-pipeline-4 + name: cluster-task-pipeline-4-v1alpha1 spec: steps: - name: task-two-step-one @@ -17,7 +17,7 @@ spec: tasks: - name: cluster-task-pipeline-4 taskRef: - name: cluster-task-pipeline-4 + name: cluster-task-pipeline-4-v1alpha1 kind: ClusterTask --- apiVersion: tekton.dev/v1alpha1 diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml index 02646f3a822..f86a276f584 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml @@ -74,7 +74,7 @@ spec: apiVersion: tekton.dev/v1alpha1 kind: PipelineRun metadata: - name: demo-condtional-pr-without-condition-resource + name: demo-conditional-pr-without-condition-resource spec: pipelineRef: name: pipeline-list-pipeline-repo-files diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun.yaml index a633e29feea..07b42161258 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/pipelineruns/conditional-pipelinerun.yaml @@ -83,7 +83,7 @@ spec: apiVersion: tekton.dev/v1alpha1 kind: PipelineRun metadata: - name: condtional-pr + name: conditional-pr spec: pipelineRef: name: conditional-pipeline diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/clustertask.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/clustertask.yaml index 3e3e1f8f844..876f5a35f6d 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/clustertask.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/clustertask.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1alpha1 kind: ClusterTask metadata: - name: clustertask + name: clustertask-v1alpha1 spec: steps: - image: ubuntu @@ -13,5 +13,5 @@ metadata: generateName: clustertask- spec: taskRef: - name: clustertask + name: clustertask-v1alpha1 kind: ClusterTask diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/optional-resources-with-clustertask.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/optional-resources-with-clustertask.yaml index 428e857e94d..53ede3d6979 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/optional-resources-with-clustertask.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1alpha1/taskruns/optional-resources-with-clustertask.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1alpha1 kind: ClusterTask metadata: - name: clustertask-with-optional-resources + name: clustertask-with-optional-resources-v1alpha1 spec: inputs: resources: @@ -31,5 +31,5 @@ metadata: name: clustertask-without-resources spec: taskRef: - name: clustertask-with-optional-resources + name: clustertask-with-optional-resources-v1alpha1 kind: ClusterTask diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml index 2f7913fa884..48b3f855f6a 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun-with-optional-resources.yaml @@ -74,7 +74,7 @@ spec: apiVersion: tekton.dev/v1beta1 kind: PipelineRun metadata: - name: demo-condtional-pr-without-condition-resource + name: demo-conditional-pr-without-condition-resource spec: pipelineRef: name: pipeline-list-pipeline-repo-files diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun.yaml index cb67198440f..03b16e029f8 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/pipelineruns/conditional-pipelinerun.yaml @@ -83,7 +83,7 @@ spec: apiVersion: tekton.dev/v1beta1 kind: PipelineRun metadata: - name: condtional-pr + name: conditional-pr spec: pipelineRef: name: conditional-pipeline diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/clustertask.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/clustertask.yaml index 10972427dab..9124271adc9 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/clustertask.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/clustertask.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 kind: ClusterTask metadata: - name: clustertask + name: clustertask-v1beta1 spec: steps: - image: ubuntu @@ -13,5 +13,5 @@ metadata: generateName: clustertask- spec: taskRef: - name: clustertask + name: clustertask-v1beta1 kind: ClusterTask diff --git a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/optional-resources-with-clustertask.yaml b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/optional-resources-with-clustertask.yaml index 1ecae87cf8d..882443c6971 100644 --- a/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/optional-resources-with-clustertask.yaml +++ b/vendor/github.com/tektoncd/pipeline/examples/v1beta1/taskruns/optional-resources-with-clustertask.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1beta1 kind: ClusterTask metadata: - name: clustertask-with-optional-resources + name: clustertask-with-optional-resources-v1beta1 spec: params: - name: filename @@ -30,5 +30,5 @@ metadata: name: clustertask-without-resources spec: taskRef: - name: clustertask-with-optional-resources + name: clustertask-with-optional-resources-v1beta1 kind: ClusterTask diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource.go index 4aa7b3b5b8a..a50d34b9533 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource.go @@ -87,10 +87,7 @@ func NewResource(name, gitImage string, r *resource.PipelineResource) (*Resource gitResource.NOProxy = param.Value } } - // default revision to master if nothing is provided - if gitResource.Revision == "" { - gitResource.Revision = "master" - } + return &gitResource, nil } @@ -149,10 +146,13 @@ func (s *Resource) Replacements() map[string]string { func (s *Resource) GetInputTaskModifier(_ *v1beta1.TaskSpec, path string) (v1beta1.TaskModifier, error) { args := []string{ "-url", s.URL, - "-revision", s.Revision, "-path", path, } + if s.Revision != "" { + args = append(args, "-revision", s.Revision) + } + if s.Refspec != "" { args = append(args, "-refspec", s.Refspec) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource_test.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource_test.go index a36ce703ed2..125ae924586 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource_test.go @@ -74,7 +74,7 @@ func TestNewGitResource_Valid(t *testing.T) { Name: "test-resource", Type: resourcev1alpha1.PipelineResourceTypeGit, URL: "git@github.com:test/test.git", - Revision: "master", + Revision: "", Refspec: "", GitImage: "override-with-git:latest", Submodules: true, @@ -96,7 +96,7 @@ func TestNewGitResource_Valid(t *testing.T) { Name: "test-resource", Type: resourcev1alpha1.PipelineResourceTypeGit, URL: "git@github.com:test/test.git", - Revision: "master", + Revision: "", Refspec: "refs/changes/22/222134", GitImage: "override-with-git:latest", Submodules: true, @@ -117,7 +117,7 @@ func TestNewGitResource_Valid(t *testing.T) { Name: "test-resource", Type: resourcev1alpha1.PipelineResourceTypeGit, URL: "git@github.com:test/test.git", - Revision: "master", + Revision: "", Refspec: "", GitImage: "override-with-git:latest", Submodules: true, @@ -394,10 +394,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", }, WorkingDir: "/workspace", Env: []corev1.EnvVar{ @@ -431,10 +431,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-submodules=false", }, WorkingDir: "/workspace", @@ -469,10 +469,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-depth", "8", }, @@ -508,10 +508,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-submodules=false", "-sslVerify=false", }, @@ -546,10 +546,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-submodules=false", "-sslVerify=false", }, @@ -583,10 +583,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-submodules=false", "-sslVerify=false", }, @@ -620,10 +620,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-submodules=false", "-sslVerify=false", }, @@ -658,10 +658,10 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { Args: []string{ "-url", "git@github.com:test/test.git", - "-revision", - "master", "-path", "/test/test", + "-revision", + "master", "-refspec", "refs/tags/v1.0:refs/tags/v1.0 refs/heads/master:refs/heads/master", "-submodules=false", @@ -675,6 +675,42 @@ func TestGitResource_GetDownloadTaskModifier(t *testing.T) { {Name: "NO_PROXY", Value: "no-proxy.git.com"}, }, }, + }, { + desc: "Without Refspec and without revision", + gitResource: &git.Resource{ + Name: "git-resource", + Type: resourcev1alpha1.PipelineResourceTypeGit, + URL: "git@github.com:test/test.git", + Revision: "", + Refspec: "", + GitImage: "override-with-git:latest", + Submodules: false, + Depth: 1, + SSLVerify: true, + HTTPProxy: "http-proxy.git.com", + HTTPSProxy: "https-proxy.git.com", + NOProxy: "no-proxy.git.com", + }, + want: corev1.Container{ + Name: "git-source-git-resource-twkr2", + Image: "override-with-git:latest", + Command: []string{"/ko-app/git-init"}, + Args: []string{ + "-url", + "git@github.com:test/test.git", + "-path", + "/test/test", + "-submodules=false", + }, + WorkingDir: "/workspace", + Env: []corev1.EnvVar{ + {Name: "TEKTON_RESOURCE_NAME", Value: "git-resource"}, + {Name: "HOME", Value: pipeline.HomeDir}, + {Name: "HTTP_PROXY", Value: "http-proxy.git.com"}, + {Name: "HTTPS_PROXY", Value: "https-proxy.git.com"}, + {Name: "NO_PROXY", Value: "no-proxy.git.com"}, + }, + }, }} { t.Run(tc.desc, func(t *testing.T) { ts := v1beta1.TaskSpec{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/git/git.go b/vendor/github.com/tektoncd/pipeline/pkg/git/git.go index 7b5cc047f4d..50fe8fe194f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/git/git.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/git/git.go @@ -64,9 +64,6 @@ func Fetch(logger *zap.SugaredLogger, spec FetchSpec) error { return err } - if spec.Revision == "" { - spec.Revision = "master" - } if spec.Path != "" { if _, err := run(logger, "", "init", spec.Path); err != nil { return err @@ -85,6 +82,12 @@ func Fetch(logger *zap.SugaredLogger, spec FetchSpec) error { logger.Warnf("Failed to set http.sslVerify in git config: %s", err) return err } + if spec.Revision == "" { + spec.Revision = "HEAD" + if _, err := run(logger, "", "symbolic-ref", spec.Revision, "refs/remotes/origin/HEAD"); err != nil { + return err + } + } fetchArgs := []string{"fetch"} if spec.Submodules { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/pod/status.go b/vendor/github.com/tektoncd/pipeline/pkg/pod/status.go index 55046be8bc5..fb35ee310d7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/pod/status.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/pod/status.go @@ -116,9 +116,14 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev for _, s := range pod.Status.ContainerStatuses { if IsContainerStep(s.Name) { if s.State.Terminated != nil && len(s.State.Terminated.Message) != 0 { - if err := updateStatusStartTime(&s); err != nil { + message, time, err := removeStartInfoFromTerminationMessage(s) + if err != nil { logger.Errorf("error setting the start time of step %q in taskrun %q: %w", s.Name, tr.Name, err) } + if time != nil { + s.State.Terminated.StartedAt = *time + s.State.Terminated.Message = message + } } trs.Steps = append(trs.Steps, v1beta1.StepState{ ContainerState: *s.State.DeepCopy(), @@ -151,34 +156,35 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev return *trs } -// updateStatusStartTime searches for a result called "StartedAt" in the JSON-formatted termination message -// of a step and sets the State.Terminated.StartedAt field to this time if it's found. The "StartedAt" result -// is also removed from the list of results in the container status. -func updateStatusStartTime(s *corev1.ContainerStatus) error { +// removeStartInfoFromTerminationMessage searches for a result called "StartedAt" in the JSON-formatted +// termination message of a step and returns the values to use for sets State.Terminated if it's +// found. The "StartedAt" result is also removed from the list of results in the container status. +func removeStartInfoFromTerminationMessage(s corev1.ContainerStatus) (string, *metav1.Time, error) { r, err := termination.ParseMessage(s.State.Terminated.Message) if err != nil { - return fmt.Errorf("termination message could not be parsed as JSON: %w", err) + return "", nil, fmt.Errorf("termination message could not be parsed as JSON: %w", err) } for index, result := range r { if result.Key == "StartedAt" { t, err := time.Parse(timeFormat, result.Value) if err != nil { - return fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err) + return "", nil, fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err) } - s.State.Terminated.StartedAt = metav1.NewTime(t) + message := "" + startedAt := metav1.NewTime(t) // remove the entry for the starting time r = append(r[:index], r[index+1:]...) if len(r) == 0 { - s.State.Terminated.Message = "" + message = "" } else if bytes, err := json.Marshal(r); err != nil { - return fmt.Errorf("error marshalling remaining results back into termination message: %w", err) + return "", nil, fmt.Errorf("error marshalling remaining results back into termination message: %w", err) } else { - s.State.Terminated.Message = string(bytes) + message = string(bytes) } - break + return message, &startedAt, nil } } - return nil + return "", nil, nil } func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun.go index d15ef0090ec..8899f2bcdac 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun.go @@ -132,9 +132,8 @@ var ( // resource with the current status of the resource. func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) pkgreconciler.Event { logger := logging.FromContext(ctx) - recorder := controller.GetEventRecorder(ctx) - // Snapshot original for the label/annotation check below. - original := pr.DeepCopy() + // Read the initial condition + before := pr.Status.GetCondition(apis.ConditionSucceeded) if !pr.HasStarted() { pr.Status.InitializeConditions() @@ -145,16 +144,9 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) } // start goroutine to track pipelinerun timeout only startTime is not set go c.timeoutHandler.WaitPipelineRun(pr, pr.Status.StartTime) - } else { - pr.Status.InitializeConditions() } - // In case of reconcile errors, we store the error in a multierror, attempt - // to update, and return the original error combined with any update error - var merr *multierror.Error - - switch { - case pr.IsDone(): + if pr.IsDone() { // We may be reading a version of the object that was stored at an older version // and may not have had all of the assumed default specified. pr.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx)) @@ -162,16 +154,16 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) c.updatePipelineResults(ctx, pr) if err := artifacts.CleanupArtifactStorage(pr, c.KubeClientSet, logger); err != nil { logger.Errorf("Failed to delete PVC for PipelineRun %s: %v", pr.Name, err) - return err + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) } if err := c.cleanupAffinityAssistants(pr); err != nil { logger.Errorf("Failed to delete StatefulSet for PipelineRun %s: %v", pr.Name, err) - return err + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) } c.timeoutHandler.Release(pr) if err := c.updateTaskRunsStatusDirectly(pr); err != nil { logger.Errorf("Failed to update TaskRun status for PipelineRun %s: %v", pr.Name, err) - return err + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) } go func(metrics *Recorder) { err := metrics.DurationAndCount(pr) @@ -179,46 +171,54 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) logger.Warnf("Failed to log the metrics : %v", err) } }(c.metrics) - case pr.IsCancelled(): + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, nil) + } + + if pr.IsCancelled() { // If the pipelinerun is cancelled, cancel tasks and update status - before := pr.Status.GetCondition(apis.ConditionSucceeded) - merr = multierror.Append(merr, cancelPipelineRun(logger, pr, c.PipelineClientSet)) - after := pr.Status.GetCondition(apis.ConditionSucceeded) - events.Emit(recorder, before, after, pr) - default: - if err := c.tracker.Track(pr.GetTaskRunRef(), pr); err != nil { - logger.Errorf("Failed to create tracker for TaskRuns for PipelineRun %s: %v", pr.Name, err) - recorder.Event(pr, corev1.EventTypeWarning, v1beta1.PipelineRunReasonFailed.String(), "Failed to create tracker for TaskRuns for PipelineRun") - return err - } + err := cancelPipelineRun(logger, pr, c.PipelineClientSet) + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) + } - // Make sure that the PipelineRun status is in sync with the actual TaskRuns - err := c.updatePipelineRunStatusFromInformer(ctx, pr) - if err != nil { - // This should not fail. Return the error so we can re-try later. - logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error()) - return err - } + if err := c.tracker.Track(pr.GetTaskRunRef(), pr); err != nil { + logger.Errorf("Failed to create tracker for TaskRuns for PipelineRun %s: %v", pr.Name, err) + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) + } - // Reconcile this copy of the pipelinerun and then write back any status or label - // updates regardless of whether the reconciliation errored out. - if err = c.reconcile(ctx, pr); err != nil { - logger.Errorf("Reconcile error: %v", err.Error()) - merr = multierror.Append(merr, err) - } + // Make sure that the PipelineRun status is in sync with the actual TaskRuns + err := c.updatePipelineRunStatusFromInformer(ctx, pr) + if err != nil { + // This should not fail. Return the error so we can re-try later. + logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error()) + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) } - // If we need to update the labels or annotations, we need to call Update with these - // changes explicitly. - if !reflect.DeepEqual(original.ObjectMeta.Labels, pr.ObjectMeta.Labels) || !reflect.DeepEqual(original.ObjectMeta.Annotations, pr.ObjectMeta.Annotations) { - if _, err := c.updateLabelsAndAnnotations(pr); err != nil { - logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err)) - recorder.Event(pr, corev1.EventTypeWarning, "Error", "PipelineRun failed to update labels/annotations") - return multierror.Append(merr, err) - } + // Reconcile this copy of the pipelinerun and then write back any status or label + // updates regardless of whether the reconciliation errored out. + if err = c.reconcile(ctx, pr); err != nil { + logger.Errorf("Reconcile error: %v", err.Error()) } - return merr.ErrorOrNil() + return c.finishReconcileUpdateEmitEvents(ctx, pr, before, err) +} + +func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, pr *v1beta1.PipelineRun, beforeCondition *apis.Condition, previousError error) error { + recorder := controller.GetEventRecorder(ctx) + logger := logging.FromContext(ctx) + + afterCondition := pr.Status.GetCondition(apis.ConditionSucceeded) + events.Emit(recorder, beforeCondition, afterCondition, pr) + _, err := c.updateLabelsAndAnnotations(pr) + if err != nil { + logger.Warn("Failed to update PipelineRun labels/annotations", zap.Error(err)) + events.EmitError(recorder, err, pr) + } + + merr := multierror.Append(previousError, err).ErrorOrNil() + if controller.IsPermanentError(previousError) { + return controller.NewPermanentError(merr) + } + return merr } func (c *Reconciler) updatePipelineResults(ctx context.Context, pr *v1beta1.PipelineRun) { @@ -240,6 +240,7 @@ func (c *Reconciler) updatePipelineResults(ctx context.Context, pr *v1beta1.Pipe resolvedResultRefs := resources.ResolvePipelineResultRefs(pr.Status, pipelineSpec.Results) pr.Status.PipelineResults = getPipelineRunResults(pipelineSpec, resolvedResultRefs) } + func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) error { logger := logging.FromContext(ctx) recorder := controller.GetEventRecorder(ctx) @@ -258,7 +259,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonCouldntGetPipeline, "Error retrieving pipeline for pipelinerun %s/%s: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } // Store the fetched PipelineSpec on the PipelineRun for auditing @@ -289,7 +290,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidGraph, "PipelineRun %s/%s's Pipeline DAG is invalid: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } if err := pipelineSpec.Validate(ctx); err != nil { @@ -297,7 +298,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonFailedValidation, "Pipeline %s/%s can't be Run; it has an invalid spec: %s", pipelineMeta.Namespace, pipelineMeta.Name, err) - return nil + return controller.NewPermanentError(err) } if err := resources.ValidateResourceBindings(pipelineSpec, pr); err != nil { @@ -305,7 +306,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidBindings, "PipelineRun %s/%s doesn't bind Pipeline %s/%s's PipelineResources correctly: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return controller.NewPermanentError(err) } providedResources, err := resources.GetResourcesFromBindings(pr, c.resourceLister.PipelineResources(pr.Namespace).Get) if err != nil { @@ -313,7 +314,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonCouldntGetResource, "PipelineRun %s/%s can't be Run; it tries to bind Resources that don't exist: %s", pipelineMeta.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } // Ensure that the PipelineRun provides all the parameters required by the Pipeline if err := resources.ValidateRequiredParametersProvided(&pipelineSpec.Params, &pr.Spec.Params); err != nil { @@ -321,7 +322,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonParameterMissing, "PipelineRun %s parameters is missing some parameters required by Pipeline %s's parameters: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } // Ensure that the parameters from the PipelineRun are overriding Pipeline parameters with the same type. @@ -332,7 +333,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonParameterTypeMismatch, "PipelineRun %s/%s parameters have mismatching types with Pipeline %s/%s's parameters: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return controller.NewPermanentError(err) } // Ensure that the workspaces expected by the Pipeline are provided by the PipelineRun. @@ -340,7 +341,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidWorkspaceBinding, "PipelineRun %s/%s doesn't bind Pipeline %s/%s's Workspaces correctly: %s", pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err) - return nil + return controller.NewPermanentError(err) } // Ensure that the ServiceAccountNames defined correct. @@ -348,7 +349,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonInvalidServiceAccountMapping, "PipelineRun %s/%s doesn't define ServiceAccountNames correctly: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } // Apply parameter substitution from the PipelineRun @@ -387,7 +388,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err "PipelineRun %s/%s can't be Run; couldn't resolve all references: %s", pipelineMeta.Namespace, pr.Name, err) } - return nil + return controller.NewPermanentError(err) } if pipelineState.IsDone() && pr.IsDone() { @@ -401,7 +402,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err if err != nil { logger.Errorf("Failed to validate pipelinerun %q with error %v", pr.Name, err) pr.Status.MarkFailed(ReasonFailedValidation, err.Error()) - return nil + return controller.NewPermanentError(err) } } @@ -413,7 +414,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(volumeclaim.ReasonCouldntCreateWorkspacePVC, "Failed to create PVC for PipelineRun %s/%s Workspaces correctly: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } } @@ -424,7 +425,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err pr.Status.MarkFailed(ReasonCouldntCreateAffinityAssistantStatefulSet, "Failed to create StatefulSet for PipelineRun %s/%s correctly: %s", pr.Namespace, pr.Name, err) - return nil + return controller.NewPermanentError(err) } } } @@ -432,7 +433,7 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err as, err := artifacts.InitializeArtifactStorage(c.Images, pr, pipelineSpec, c.KubeClientSet, logger) if err != nil { logger.Infof("PipelineRun failed to initialize artifact storage %s", pr.Name) - return err + return controller.NewPermanentError(err) } // When the pipeline run is stopping, we don't schedule any new task and only @@ -444,7 +445,6 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } } - before := pr.Status.GetCondition(apis.ConditionSucceeded) after := resources.GetPipelineConditionStatus(pr, pipelineState, logger, d) switch after.Status { case corev1.ConditionTrue: @@ -456,8 +456,6 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun) err } // Read the condition the way it was set by the Mark* helpers after = pr.Status.GetCondition(apis.ConditionSucceeded) - events.Emit(recorder, before, after, pr) - pr.Status.TaskRuns = getTaskRunsStatus(pr, pipelineState) logger.Infof("PipelineRun %s status is being set to %s", pr.Name, after) return nil @@ -473,7 +471,7 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip candidateTasks, err := dag.GetSchedulable(d, pipelineState.SuccessfulPipelineTaskNames()...) if err != nil { logger.Errorf("Error getting potential next tasks for valid pipelinerun %s: %v", pr.Name, err) - return nil + return controller.NewPermanentError(err) } nextRprts := pipelineState.GetNextTasks(candidateTasks) @@ -481,7 +479,7 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip if err != nil { logger.Infof("Failed to resolve all task params for %q with error %v", pr.Name, err) pr.Status.MarkFailed(ReasonFailedValidation, err.Error()) - return nil + return controller.NewPermanentError(err) } resources.ApplyTaskResults(nextRprts, resolvedResultRefs) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun_test.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun_test.go index 41a6cf03d86..64906567f99 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -387,6 +387,8 @@ func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { } } +// TestReconcile_InvalidPipelineRuns runs "Reconcile" on several PipelineRuns that are invalid in different ways. +// It verifies that reconcile fails, how it fails and which events are triggered. func TestReconcile_InvalidPipelineRuns(t *testing.T) { ts := []*v1beta1.Task{ tb.Task("a-task-that-exists", tb.TaskNamespace("foo")), @@ -444,6 +446,9 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { tb.PipelineParamSpec("some-param", v1beta1.ParamTypeString), tb.PipelineTask("some-task", "a-task-that-needs-params")), )), + tb.PipelineRun("pipeline-invalid-dag-graph", tb.PipelineRunNamespace("foo"), tb.PipelineRunSpec("", tb.PipelineRunPipelineSpec( + tb.PipelineTask("dag-task-1", "dag-task-1", tb.RunAfter("dag-task-1")), + ))), } d := test.Data{ Tasks: ts, @@ -455,56 +460,74 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { pipelineRun *v1beta1.PipelineRun reason string hasNoDefaultLabels bool + permanentError bool }{ { name: "invalid-pipeline-shd-be-stop-reconciling", pipelineRun: prs[0], reason: ReasonCouldntGetPipeline, hasNoDefaultLabels: true, + permanentError: true, + }, { + name: "invalid-pipeline-run-missing-tasks-shd-stop-reconciling", + pipelineRun: prs[1], + reason: ReasonCouldntGetTask, + permanentError: true, }, { - name: "invalid-pipeline-run-missing-tasks-shd-stop-reconciling", - pipelineRun: prs[1], - reason: ReasonCouldntGetTask, + name: "invalid-pipeline-run-params-dont-exist-shd-stop-reconciling", + pipelineRun: prs[2], + reason: ReasonFailedValidation, + permanentError: true, }, { - name: "invalid-pipeline-run-params-dont-exist-shd-stop-reconciling", - pipelineRun: prs[2], - reason: ReasonFailedValidation, + name: "invalid-pipeline-run-resources-not-bound-shd-stop-reconciling", + pipelineRun: prs[3], + reason: ReasonInvalidBindings, + permanentError: true, }, { - name: "invalid-pipeline-run-resources-not-bound-shd-stop-reconciling", - pipelineRun: prs[3], - reason: ReasonInvalidBindings, + name: "invalid-pipeline-run-missing-resource-shd-stop-reconciling", + pipelineRun: prs[4], + reason: ReasonCouldntGetResource, + permanentError: true, }, { - name: "invalid-pipeline-run-missing-resource-shd-stop-reconciling", - pipelineRun: prs[4], - reason: ReasonCouldntGetResource, + name: "invalid-pipeline-missing-declared-resource-shd-stop-reconciling", + pipelineRun: prs[5], + reason: ReasonFailedValidation, + permanentError: true, }, { - name: "invalid-pipeline-missing-declared-resource-shd-stop-reconciling", - pipelineRun: prs[5], - reason: ReasonFailedValidation, + name: "invalid-pipeline-mismatching-parameter-types", + pipelineRun: prs[6], + reason: ReasonParameterTypeMismatch, + permanentError: true, }, { - name: "invalid-pipeline-mismatching-parameter-types", - pipelineRun: prs[6], - reason: ReasonParameterTypeMismatch, + name: "invalid-pipeline-missing-conditions-shd-stop-reconciling", + pipelineRun: prs[7], + reason: ReasonCouldntGetCondition, + permanentError: true, }, { - name: "invalid-pipeline-missing-conditions-shd-stop-reconciling", - pipelineRun: prs[7], - reason: ReasonCouldntGetCondition, + name: "invalid-embedded-pipeline-resources-bot-bound-shd-stop-reconciling", + pipelineRun: prs[8], + reason: ReasonInvalidBindings, + permanentError: true, }, { - name: "invalid-embedded-pipeline-resources-bot-bound-shd-stop-reconciling", - pipelineRun: prs[8], - reason: ReasonInvalidBindings, + name: "invalid-embedded-pipeline-bad-name-shd-stop-reconciling", + pipelineRun: prs[9], + reason: ReasonFailedValidation, + permanentError: true, }, { - name: "invalid-embedded-pipeline-bad-name-shd-stop-reconciling", - pipelineRun: prs[9], - reason: ReasonFailedValidation, + name: "invalid-embedded-pipeline-mismatching-parameter-types", + pipelineRun: prs[10], + reason: ReasonParameterTypeMismatch, + permanentError: true, }, { - name: "invalid-embedded-pipeline-mismatching-parameter-types", - pipelineRun: prs[10], - reason: ReasonParameterTypeMismatch, + name: "invalid-pipeline-run-missing-params-shd-stop-reconciling", + pipelineRun: prs[11], + reason: ReasonParameterMissing, + permanentError: true, }, { - name: "invalid-pipeline-run-missing-params-shd-stop-reconciling", - pipelineRun: prs[11], - reason: ReasonParameterMissing, + name: "invalid-pipeline-with-invalid-dag-graph", + pipelineRun: prs[12], + reason: ReasonInvalidGraph, + permanentError: true, }, } @@ -514,12 +537,15 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { defer cancel() c := testAssets.Controller - if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.pipelineRun)); err != nil { - t.Fatalf("Error reconciling: %s", err) + // When a PipelineRun is invalid and can't run, we expect a permanent error that will + // tell the Reconciler to not keep trying to reconcile. + reconcileError := c.Reconciler.Reconcile(context.Background(), getRunName(tc.pipelineRun)) + if reconcileError == nil { + t.Fatalf("Expected an error to be returned by Reconcile, got nil instead") + } + if controller.IsPermanentError(reconcileError) != tc.permanentError { + t.Fatalf("Expected the error to be permanent: %v but got permanent: %v", tc.permanentError, controller.IsPermanentError(reconcileError)) } - // When a PipelineRun is invalid and can't run, we don't want to return an error because - // an error will tell the Reconciler to keep trying to reconcile; instead we want to stop - // and forget about the Run. reconciledRun, err := testAssets.Clients.Pipeline.TektonV1beta1().PipelineRuns(tc.pipelineRun.Namespace).Get(tc.pipelineRun.Name, metav1.GetOptions{}) if err != nil { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 4a2b143f50b..14a0dad982f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -234,7 +234,7 @@ type GetTaskRun func(name string) (*v1beta1.TaskRun, error) func GetResourcesFromBindings(pr *v1beta1.PipelineRun, getResource resources.GetResource) (map[string]*resourcev1alpha1.PipelineResource, error) { rs := map[string]*resourcev1alpha1.PipelineResource{} for _, resource := range pr.Spec.Resources { - r, err := resources.GetResourceFromBinding(&resource, getResource) + r, err := resources.GetResourceFromBinding(resource, getResource) if err != nil { return rs, fmt.Errorf("error following resource reference for %s: %w", resource.Name, err) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index 686b9dd2437..f93c3b668e7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -1450,6 +1451,29 @@ func TestGetPipelineConditionStatus(t *testing.T) { } } +// pipeline should result in timeout if its runtime exceeds its spec.Timeout based on its status.Timeout +func TestGetPipelineConditionStatus_PipelineTimeouts(t *testing.T) { + d, err := DagFromState(oneFinishedState) + if err != nil { + t.Fatalf("Unexpected error while buildig DAG for state %v: %v", oneFinishedState, err) + } + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "pipelinerun-no-tasks-started"}, + Spec: v1beta1.PipelineRunSpec{ + Timeout: &metav1.Duration{Duration: 1 * time.Minute}, + }, + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + StartTime: &metav1.Time{Time: time.Now().Add(-2 * time.Minute)}, + }, + }, + } + c := GetPipelineConditionStatus(pr, oneFinishedState, zap.NewNop().Sugar(), d) + if c.Status != corev1.ConditionFalse && c.Reason != v1beta1.PipelineRunReasonTimedOut.String() { + t.Fatalf("Expected to get status %s but got %s for state %v", corev1.ConditionFalse, c.Status, oneFinishedState) + } +} + func TestGetResourcesFromBindings(t *testing.T) { pr := tb.PipelineRun("pipelinerun", tb.PipelineRunSpec("pipeline", tb.PipelineRunResourceBinding("git-resource", tb.PipelineResourceBindingRef("sweet-resource")), diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/metrics.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/metrics.go index 3ce9a0e97f4..3dcc78570bf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/metrics.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/metrics.go @@ -204,7 +204,7 @@ func (r *Recorder) DurationAndCount(tr *v1beta1.TaskRun) error { return err } - stats.Record(ctx, prTRDuration.M(float64(duration/time.Second))) + metrics.Record(ctx, prTRDuration.M(float64(duration/time.Second))) metrics.Record(ctx, trCount.M(1)) return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/image_exporter.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/image_exporter.go index f9b19a9568f..8634378cb36 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/image_exporter.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/image_exporter.go @@ -45,7 +45,7 @@ func AddOutputImageDigestExporter( return fmt.Errorf("failed to get bound resource: %w while adding output image digest exporter", err) } - resource, err := GetResourceFromBinding(&boundResource.PipelineResourceBinding, gr) + resource, err := GetResourceFromBinding(boundResource.PipelineResourceBinding, gr) if err != nil { return fmt.Errorf("failed to get output pipeline Resource for taskRun %q resource %v; error: %w while adding output image digest exporter", tr.Name, boundResource, err) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/input_resource_test.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/input_resource_test.go index 81ce89b9fed..051e525b534 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/input_resource_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/input_resource_test.go @@ -369,7 +369,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "master", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git"}, @@ -410,7 +410,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-with-branch-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace", "-revision", "branch"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-branch"}, @@ -458,7 +458,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-with-branch-mz4c7", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace", "-revision", "branch"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-branch"}, @@ -468,7 +468,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-with-branch-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/git-duplicate-space"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/git-duplicate-space", "-revision", "branch"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-branch"}, @@ -509,7 +509,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "master", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git"}, @@ -550,7 +550,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-with-branch-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace", "-revision", "branch"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-branch"}, @@ -639,7 +639,7 @@ func TestAddInputResourceToTask(t *testing.T) { Name: "git-source-the-git-with-sslVerify-false-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/gitspace", "-sslVerify=false"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace", "-revision", "branch", "-sslVerify=false"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-sslVerify-false"}, @@ -964,7 +964,7 @@ gsutil cp gs://fake-bucket/rules.zip /workspace/gcs-dir Name: "git-source-the-git-with-branch-9l9zj", Image: "override-with-git:latest", Command: []string{"/ko-app/git-init"}, - Args: []string{"-url", "https://github.com/grafeas/kritis", "-revision", "branch", "-path", "/workspace/gitspace"}, + Args: []string{"-url", "https://github.com/grafeas/kritis", "-path", "/workspace/gitspace", "-revision", "branch"}, WorkingDir: "/workspace", Env: []corev1.EnvVar{ {Name: "TEKTON_RESOURCE_NAME", Value: "the-git-with-branch"}, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution.go index e01efe83cb5..83d2a0646a9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution.go @@ -55,7 +55,7 @@ func ResolveTaskResources(ts *v1beta1.TaskSpec, taskName string, kind v1beta1.Ta } for _, r := range inputs { - rr, err := GetResourceFromBinding(&r.PipelineResourceBinding, gr) + rr, err := GetResourceFromBinding(r.PipelineResourceBinding, gr) if err != nil { return nil, fmt.Errorf("couldn't retrieve referenced input PipelineResource: %w", err) } @@ -64,7 +64,7 @@ func ResolveTaskResources(ts *v1beta1.TaskSpec, taskName string, kind v1beta1.Ta } for _, r := range outputs { - rr, err := GetResourceFromBinding(&r.PipelineResourceBinding, gr) + rr, err := GetResourceFromBinding(r.PipelineResourceBinding, gr) if err != nil { return nil, fmt.Errorf("couldn't retrieve referenced output PipelineResource: %w", err) @@ -77,7 +77,7 @@ func ResolveTaskResources(ts *v1beta1.TaskSpec, taskName string, kind v1beta1.Ta // GetResourceFromBinding will return an instance of a PipelineResource to use for r, either by getting it with getter or by // instantiating it from the embedded spec. -func GetResourceFromBinding(r *v1beta1.PipelineResourceBinding, getter GetResource) (*resourcev1alpha1.PipelineResource, error) { +func GetResourceFromBinding(r v1beta1.PipelineResourceBinding, getter GetResource) (*resourcev1alpha1.PipelineResource, error) { if (r.ResourceRef != nil && r.ResourceRef.Name != "") && r.ResourceSpec != nil { return nil, errors.New("Both ResourseRef and ResourceSpec are defined. Expected only one") } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution_test.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution_test.go index da0974088a5..cc72c424b96 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution_test.go @@ -273,7 +273,7 @@ func TestGetResourceFromBinding_Ref(t *testing.T) { Name: "git-repo", }, } - binding := &v1beta1.PipelineResourceBinding{ + binding := v1beta1.PipelineResourceBinding{ ResourceRef: &v1beta1.PipelineResourceRef{ Name: "foo-resource", }, @@ -292,7 +292,7 @@ func TestGetResourceFromBinding_Ref(t *testing.T) { } func TestGetResourceFromBinding_Spec(t *testing.T) { - binding := &v1beta1.PipelineResourceBinding{ + binding := v1beta1.PipelineResourceBinding{ ResourceSpec: &resourcev1alpha1.PipelineResourceSpec{ Type: resourcev1alpha1.PipelineResourceTypeGit, Params: []resourcev1alpha1.ResourceParam{{ @@ -318,7 +318,7 @@ func TestGetResourceFromBinding_Spec(t *testing.T) { } func TestGetResourceFromBinding_NoNameOrSpec(t *testing.T) { - binding := &v1beta1.PipelineResourceBinding{} + binding := v1beta1.PipelineResourceBinding{} gr := func(n string) (*resourcev1alpha1.PipelineResource, error) { return nil, nil } @@ -330,7 +330,7 @@ func TestGetResourceFromBinding_NoNameOrSpec(t *testing.T) { } func TestGetResourceFromBinding_NameAndSpec(t *testing.T) { - binding := &v1beta1.PipelineResourceBinding{ + binding := v1beta1.PipelineResourceBinding{ ResourceSpec: &resourcev1alpha1.PipelineResourceSpec{ Type: resourcev1alpha1.PipelineResourceTypeGit, Params: []resourcev1alpha1.ResourceParam{{ @@ -353,7 +353,7 @@ func TestGetResourceFromBinding_NameAndSpec(t *testing.T) { } func TestGetResourceFromBinding_ErrorGettingResource(t *testing.T) { - binding := &v1beta1.PipelineResourceBinding{ + binding := v1beta1.PipelineResourceBinding{ ResourceRef: &v1beta1.PipelineResourceRef{ Name: "foo-resource", }, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun.go index 0602cd4f957..24bea0d0ed4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun.go @@ -88,6 +88,8 @@ var _ taskrunreconciler.Interface = (*Reconciler)(nil) func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkgreconciler.Event { logger := logging.FromContext(ctx) recorder := controller.GetEventRecorder(ctx) + // Read the initial condition + before := tr.Status.GetCondition(apis.ConditionSucceeded) // If the TaskRun is just starting, this will also set the starttime, // from which the timeout will immediately begin counting down. @@ -157,7 +159,6 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // If the TaskRun is cancelled, kill resources and update status if tr.IsCancelled() { - before := tr.Status.GetCondition(apis.ConditionSucceeded) message := fmt.Sprintf("TaskRun %q was cancelled", tr.Name) err := c.failTaskRun(ctx, tr, v1beta1.TaskRunReasonCancelled, message) return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err) @@ -166,7 +167,6 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // Check if the TaskRun has timed out; if it is, this will set its status // accordingly. if tr.HasTimedOut() { - before := tr.Status.GetCondition(apis.ConditionSucceeded) message := fmt.Sprintf("TaskRun %q failed to finish within %q", tr.Name, tr.GetTimeout()) err := c.failTaskRun(ctx, tr, v1beta1.TaskRunReasonTimedOut, message) return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err) @@ -184,7 +184,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg } // Store the condition before reconcile - before := tr.Status.GetCondition(apis.ConditionSucceeded) + before = tr.Status.GetCondition(apis.ConditionSucceeded) // Reconcile this copy of the task run and then write back any status // updates regardless of whether the reconciliation errored out. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun_test.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun_test.go index 224c250c6e1..108c614f006 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun_test.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/taskrun_test.go @@ -940,7 +940,7 @@ func TestReconcile(t *testing.T) { tb.Command(entrypointLocation), tb.Args("-wait_file", "/tekton/tools/0", "-post_file", "/tekton/tools/1", "-termination_path", "/tekton/termination", "-entrypoint", "/ko-app/git-init", "--", "-url", "https://foo.git", - "-revision", "master", "-path", "/workspace/workspace"), + "-path", "/workspace/workspace"), tb.WorkingDir(workspaceDir), tb.EnvVar("HOME", "/tekton/home"), tb.EnvVar("TEKTON_RESOURCE_NAME", "workspace"), @@ -1023,8 +1023,6 @@ func TestReconcile(t *testing.T) { "--", "-url", "https://foo.git", - "-revision", - "master", "-path", "/workspace/workspace", ), @@ -1132,10 +1130,10 @@ func TestReconcile(t *testing.T) { "--", "-url", "github.com/foo/bar.git", - "-revision", - "rel-can", "-path", - "/workspace/workspace"), + "/workspace/workspace", + "-revision", + "rel-can"), tb.WorkingDir(workspaceDir), // Note: the duplication of HOME env var here is intentional: our pod builder // adds it first and the git pipelineresource adds its own to ensure that HOME diff --git a/vendor/github.com/tektoncd/pipeline/tekton/README.md b/vendor/github.com/tektoncd/pipeline/tekton/README.md index 5cc7d31c834..dae132973ae 100644 --- a/vendor/github.com/tektoncd/pipeline/tekton/README.md +++ b/vendor/github.com/tektoncd/pipeline/tekton/README.md @@ -30,6 +30,9 @@ To make a new release: ([see one of way of doing that here](https://github.com/tektoncd/plumbing/tree/master/tekton/resources/release#create-draft-release)). 1. Add an entry to [the README](../README.md) at `HEAD` for docs and examples for the new release ([README.md#read-the-docs](../README.md#read-the-docs)). +1. Update [the catalog repo](https://github.com/tektoncd/catalog) test infrastructure to use the + new release by updating the `RELEASE_YAML` link in + [e2e-tests.sh](https://github.com/tektoncd/catalog/blob/v1beta1/test/e2e-tests.sh). ### Run the Pipeline diff --git a/vendor/github.com/tektoncd/pipeline/tekton/publish.yaml b/vendor/github.com/tektoncd/pipeline/tekton/publish.yaml index 8b73689a029..c4161e6e2ba 100644 --- a/vendor/github.com/tektoncd/pipeline/tekton/publish.yaml +++ b/vendor/github.com/tektoncd/pipeline/tekton/publish.yaml @@ -111,13 +111,16 @@ spec: value: "off" - name: GOFLAGS value: "-mod=vendor" - - name: CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE - value: /secret/release.json + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/secret/release.json" script: | #!/usr/bin/env bash set -ex - # Auth with CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE + # Activate service account + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} + + # Setup docker-auth gcloud auth configure-docker # ko requires this variable to be set in order to set image creation timestamps correctly https://github.com/google/go-containerregistry/pull/146 @@ -134,7 +137,7 @@ spec: # include it. As of 9/20/2019, this amounts to about 11MB of additional # data in each image. TMPDIR=$(mktemp -d) - tar cvfz ${TMPDIR}/source.tar.gz vendor/ + tar cfz ${TMPDIR}/source.tar.gz vendor/ for d in cmd/*; do ln -s ${TMPDIR}/source.tar.gz ${d}/kodata/ done diff --git a/vendor/github.com/tektoncd/pipeline/test/README.md b/vendor/github.com/tektoncd/pipeline/test/README.md index 5b135f929e8..0940680d7e3 100644 --- a/vendor/github.com/tektoncd/pipeline/test/README.md +++ b/vendor/github.com/tektoncd/pipeline/test/README.md @@ -206,9 +206,12 @@ go test -v -tags=e2e -count=1 ./test -run ^TestTaskRun To run the YAML e2e tests, run the following command: ```bash -./test/e2e-tests-yaml.sh +go test -v -count=1 -tags=examples -timeout=20m ./test/ ``` +To limit parallelism of tests, use `-parallel=n` where `n` is the number of +tests to run in parallel. + ### Running upgrade tests There are two scenarios in upgrade tests. One is to install the previous release, upgrade to the current release, and diff --git a/vendor/github.com/tektoncd/pipeline/test/clients.go b/vendor/github.com/tektoncd/pipeline/test/clients.go index 56822302a80..d05d17ad4e1 100644 --- a/vendor/github.com/tektoncd/pipeline/test/clients.go +++ b/vendor/github.com/tektoncd/pipeline/test/clients.go @@ -54,6 +54,7 @@ type clients struct { KubeClient *knativetest.KubeClient PipelineClient v1beta1.PipelineInterface + ClusterTaskClient v1beta1.ClusterTaskInterface TaskClient v1beta1.TaskInterface TaskRunClient v1beta1.TaskRunInterface PipelineRunClient v1beta1.PipelineRunInterface @@ -88,6 +89,7 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) } c.PipelineClient = cs.TektonV1beta1().Pipelines(namespace) + c.ClusterTaskClient = cs.TektonV1beta1().ClusterTasks() c.TaskClient = cs.TektonV1beta1().Tasks(namespace) c.TaskRunClient = cs.TektonV1beta1().TaskRuns(namespace) c.PipelineRunClient = cs.TektonV1beta1().PipelineRuns(namespace) diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh index e0a6328bc33..1fba0249576 100755 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh @@ -18,115 +18,6 @@ source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh -function teardown() { - subheader "Tearing down Tekton Pipelines" - ko delete --ignore-not-found=true -f config/ - # teardown will be called when run against an existing cluster to cleanup before - # continuing, so we must wait for the cleanup to complete or the subsequent attempt - # to deploy to the same namespace will fail - wait_until_object_does_not_exist namespace tekton-pipelines -} - -function output_yaml_test_results() { - # If formatting fails for any reason, use yaml as a fall back. - kubectl get $1.tekton.dev -o=custom-columns-file=${REPO_ROOT_DIR}/test/columns.txt || \ - kubectl get $1.tekton.dev -oyaml -} - -function output_pods_logs() { - echo ">>> $1" - kubectl get $1.tekton.dev -o yaml - local runs=$(kubectl get $1.tekton.dev --output=jsonpath="{.items[*].metadata.name}") - set +e - for run in ${runs}; do - echo ">>>> $1 ${run}" - case "$1" in - "taskrun") - tkn taskrun logs --nocolour ${run} - ;; - "pipelinerun") - tkn pipelinerun logs --nocolour ${run} - ;; - esac - done - set -e - echo ">>>> Pods" - kubectl get pods -o yaml -} - -# Called by `fail_test` (provided by `e2e-tests.sh`) to dump info on test failure -function dump_extra_cluster_state() { - echo ">>> Pipeline controller log:" - kubectl -n tekton-pipelines logs $(get_app_pod tekton-pipelines-controller tekton-pipelines) - echo ">>> Pipeline webhook log:" - kubectl -n tekton-pipelines logs $(get_app_pod tekton-pipelines-webhook tekton-pipelines) -} - -function validate_run() { - local tests_finished=0 - for i in {1..90}; do - local finished="$(kubectl get $1.tekton.dev --output=jsonpath='{.items[*].status.conditions[*].status}')" - if [[ ! "$finished" == *"Unknown"* ]]; then - tests_finished=1 - break - fi - sleep 10 - done - - return ${tests_finished} -} - -function check_results() { - local failed=0 - results="$(kubectl get $1.tekton.dev --output=jsonpath='{range .items[*]}{.metadata.name}={.status.conditions[*].type}{.status.conditions[*].status}{" "}{end}')" - for result in ${results}; do - if [[ ! "${result,,}" == *"=succeededtrue" ]]; then - echo "ERROR: test ${result} but should be succeededtrue" - failed=1 - fi - done - - return ${failed} -} - -function create_resources() { - local resource=$1 - echo ">> Creating resources ${resource}" - - # Applying the resources, either *taskruns or * *pipelineruns except those - # in the no-ci directory - for file in $(find ${REPO_ROOT_DIR}/examples/${resource}s/ -name '*.yaml' -not -path '*/no-ci/*' | sort); do - perl -p -e 's/gcr.io\/christiewilson-catfactory/$ENV{KO_DOCKER_REPO}/g' ${file} | ko create -f - || return 1 - done -} - -function run_tests() { - local resource=$1 - - # Wait for tests to finish. - echo ">> Waiting for tests to finish for ${resource}" - if validate_run $resource; then - echo "ERROR: tests timed out" - fi - - # Check that tests passed. - echo ">> Checking test results for ${resource}" - if check_results $resource; then - echo ">> All YAML tests passed" - return 0 - fi - return 1 -} - -function run_yaml_tests() { - echo ">> Starting tests for the resource ${1}/${2}" - create_resources ${1}/${2} || fail_test "Could not create ${2}/${1} from the examples" - if ! run_tests ${2}; then - return 1 - fi - return 0 -} - function install_pipeline_crd() { echo ">> Deploying Tekton Pipelines" ko resolve -f config/ \ diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-upgrade.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-upgrade.sh index cce0c60bd89..f635a860131 100755 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-upgrade.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-upgrade.sh @@ -51,15 +51,7 @@ failed=0 go_test_e2e -timeout=20m ./test || failed=1 # Run the post-integration tests. -for test in taskrun pipelinerun; do - header "Running YAML e2e tests for ${test}s" - if ! run_yaml_tests ${test}; then - echo "ERROR: one or more YAML tests failed" - output_yaml_test_results ${test} - output_pods_logs ${test} - failed=1 - fi -done +go_test_e2e -tags=examples -timeout=20m ./test/ || failed=1 # Remove all the pipeline CRDs, and clean up the environment for next Scenario. uninstall_pipeline_crd @@ -87,15 +79,7 @@ go_test_e2e -timeout=20m ./test || failed=1 # Run the post-integration tests. We do not need to install the resources again, since # they are installed before the upgrade. We verify if they still work, after going through # the upgrade. -for test in taskrun pipelinerun; do - header "Running YAML e2e tests for ${test}s" - if ! run_tests ${test}; then - echo "ERROR: one or more YAML tests failed" - output_yaml_test_results ${test} - output_pods_logs ${test} - failed=1 - fi -done +go_test_e2e -tags=examples -timeout=20m ./test/ || failed=1 (( failed )) && fail_test diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-yaml.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-yaml.sh deleted file mode 100755 index 6d2061dca9f..00000000000 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-yaml.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2019 The Tekton Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script calls out to scripts in tektoncd/plumbing to setup a cluster -# and deploy Tekton Pipelines to it for running integration tests. - -source $(git rev-parse --show-toplevel)/test/e2e-common.sh - -# Script entry point. - -initialize $@ - -header "Setting up environment" - -# Handle failures ourselves, so we can dump useful info. -set +o errexit -set +o pipefail - -install_pipeline_crd - -# Run the tests -failed=0 -for version in v1alpha1 v1beta1; do - for test in taskrun pipelinerun; do - header "Running YAML e2e tests for ${version} ${test}s" - if ! run_yaml_tests ${version} ${test}; then - echo "ERROR: one or more YAML tests failed" - output_yaml_test_results ${test} - output_pods_logs ${test} - failed=1 - fi - done - # Clean resources - delete_pipeline_resources - for res in services pods configmaps secrets serviceaccounts persistentvolumeclaims; do - kubectl delete --ignore-not-found=true ${res} --all - done -done - -(( failed )) && fail_test - -success diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh index c13f01308ae..9fd46940045 100755 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh @@ -36,7 +36,7 @@ go_test_e2e -timeout=20m ./test/... || failed=1 # Run these _after_ the integration tests b/c they don't quite work all the way # and they cause a lot of noise in the logs, making it harder to debug integration # test failures. -${REPO_ROOT_DIR}/test/e2e-tests-yaml.sh --run-tests || failed=1 +go_test_e2e -tags=examples -timeout=20m ./test/ || failed=1 (( failed )) && fail_test success diff --git a/vendor/github.com/tektoncd/pipeline/test/examples_test.go b/vendor/github.com/tektoncd/pipeline/test/examples_test.go new file mode 100644 index 00000000000..24d89ef339e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/examples_test.go @@ -0,0 +1,221 @@ +// +build examples + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "errors" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +var ( + pipelineRunTimeout = 10 * time.Minute +) + +const ( + DEFAULT_KO_DOCKER_REPO = `gcr.io\/christiewilson-catfactory` + DEFAULT_NAMESPACE = `namespace: default` +) + +// GetCreatedTektonCrd parses output of an external ko invocation provided as +// input, as is the kind of Tekton CRD to search for (ie. taskrun) +func GetCreatedTektonCrd(input []byte, kind string) (string, error) { + re := regexp.MustCompile(kind + `.tekton.dev\/(.+) created`) + submatch := re.FindSubmatch(input) + if submatch == nil || len(submatch) < 2 { + return "", nil + } + return string(submatch[1]), nil +} + +func waitValidatePipelineRunDone(t *testing.T, c *clients, pipelineRunName string) { + err := WaitForPipelineRunState(c, pipelineRunName, pipelineRunTimeout, Succeed(pipelineRunName), pipelineRunName) + + if err != nil { + t.Fatalf("Failed waiting for pipeline run done: %v", err) + } + return +} + +func waitValidateTaskRunDone(t *testing.T, c *clients, taskRunName string) { + // Per test basis + err := WaitForTaskRunState(c, taskRunName, Succeed(taskRunName), taskRunName) + + if err != nil { + t.Fatalf("Failed waiting for task run done: %v", err) + } + return +} + +// SubstituteEnv substitutes docker repos and bucket paths from the system +// environment for input to allow tests on local clusters. It also updates the +// namespace for ServiceAccounts so that they work under test +func SubstituteEnv(input []byte, namespace string) ([]byte, error) { + val, ok := os.LookupEnv("KO_DOCKER_REPO") + var output []byte + if ok { + re := regexp.MustCompile(DEFAULT_KO_DOCKER_REPO) + output = re.ReplaceAll(input, []byte(val)) + } else { + return nil, errors.New("KO_DOCKER_REPO is not set") + } + + re := regexp.MustCompile(DEFAULT_NAMESPACE) + output = re.ReplaceAll(output, []byte(strings.ReplaceAll(DEFAULT_NAMESPACE, "default", namespace))) + return output, nil +} + +// KoCreate wraps the ko binary and invokes `ko create` for input within +// namespace +func KoCreate(input []byte, namespace string) ([]byte, error) { + cmd := exec.Command("ko", "create", "-n", namespace, "-f", "-") + cmd.Stdin = strings.NewReader(string(input)) + + out, err := cmd.CombinedOutput() + return out, err +} + +// DeleteClusterTask removes a single clustertask by name using provided +// clientset. Test state is used for logging. DeleteClusterTask does not wait +// for the clustertask to be deleted, so it is still possible to have name +// conflicts during test +func DeleteClusterTask(t *testing.T, c *clients, name string) { + t.Logf("Deleting clustertask %s", name) + err := c.ClusterTaskClient.Delete(name, &metav1.DeleteOptions{}) + if err != nil { + t.Fatalf("Failed to delete clustertask: %v", err) + } +} + +type waitFunc func(t *testing.T, c *clients, name string) + +func exampleTest(path string, waitValidateFunc waitFunc, kind string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + // Setup unique namespaces for each test so they can run in complete + // isolation + c, namespace := setup(t) + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + inputExample, err := ioutil.ReadFile(path) + + if err != nil { + t.Fatalf("Error reading file: %v", err) + } + + subbedInput, err := SubstituteEnv(inputExample, namespace) + if err != nil { + t.Skipf("Couldn't substitute environment: %v", err) + } + + out, err := KoCreate(subbedInput, namespace) + if err != nil { + t.Fatalf("%s Output: %s", err, out) + } + + // Parse from KoCreate for now + name, err := GetCreatedTektonCrd(out, kind) + if name == "" { + // Nothing to check from ko create, this is not a taskrun or pipeline + // run. Some examples in the directory do not directly output a TaskRun + // or PipelineRun (ie. task-result.yaml). + t.Skipf("pipelinerun or taskrun not created for %s", path) + } else if err != nil { + t.Fatalf("Failed to get created Tekton CRD of kind %s: %v", kind, err) + } + + // NOTE: If an example creates more than one clustertask, they will not all + // be cleaned up + clustertask, err := GetCreatedTektonCrd(out, "clustertask") + if clustertask != "" { + knativetest.CleanupOnInterrupt(func() { DeleteClusterTask(t, c, clustertask) }, t.Logf) + defer DeleteClusterTask(t, c, clustertask) + } else if err != nil { + t.Fatalf("Failed to get created clustertask: %v", err) + } + + waitValidateFunc(t, c, name) + } +} + +func getExamplePaths(t *testing.T, dir string) []string { + var examplePaths []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Fatalf("couldn't walk path %s: %v", path, err) + } + // Do not append root and any other folders named "examples" + if info.Name() == "examples" && info.IsDir() { + return nil + } + if info.Name() == "no-ci" && info.IsDir() { + return filepath.SkipDir + } + if info.IsDir() == false && filepath.Ext(info.Name()) == ".yaml" { + examplePaths = append(examplePaths, path) + return nil + } + return nil + }) + if err != nil { + t.Fatalf("couldn't walk example directory %s: %v", dir, err) + } + + return examplePaths +} + +func extractTestName(baseDir string, path string) string { + re := regexp.MustCompile(baseDir + "/(.+).yaml") + submatch := re.FindSubmatch([]byte(path)) + if submatch == nil { + return path + } + return string(submatch[1]) +} + +func TestExamples(t *testing.T) { + baseDir := "../examples" + + t.Parallel() + for _, path := range getExamplePaths(t, baseDir) { + testName := extractTestName(baseDir, path) + waitValidateFunc := waitValidatePipelineRunDone + kind := "pipelinerun" + + if strings.Contains(path, "/taskruns/") { + waitValidateFunc = waitValidateTaskRunDone + kind = "taskrun" + } + + t.Run(testName, exampleTest(path, waitValidateFunc, kind)) + } +} diff --git a/vendor/github.com/tektoncd/pipeline/test/git_checkout_test.go b/vendor/github.com/tektoncd/pipeline/test/git_checkout_test.go index f16fefe112b..814b6f4995d 100644 --- a/vendor/github.com/tektoncd/pipeline/test/git_checkout_test.go +++ b/vendor/github.com/tektoncd/pipeline/test/git_checkout_test.go @@ -299,6 +299,63 @@ func TestGitPipelineRunFail_HTTPS_PROXY(t *testing.T) { } } +// TestGitPipelineRunWithNonMasterBranch is an integration test that will verify the source code is either fetched or pulled +// successfully under different revision inputs (default branch, branch) +// This test will run on spring-petclinic repository which does not contain a master branch as the default branch +func TestGitPipelineRunWithNonMasterBranch(t *testing.T) { + t.Parallel() + + revisions := []string{"", "main"} + + for _, revision := range revisions { + + t.Run(revision, func(t *testing.T) { + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResourceSpringPetClinic(revision, "", "true", "", "", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } + }) + } +} + +// getGitPipelineResourceSpringPetClinic will help to clone the spring-petclinic repository which does not contains master branch +func getGitPipelineResourceSpringPetClinic(revision, refspec, sslverify, httpproxy, httpsproxy, noproxy string) *v1alpha1.PipelineResource { + return tb.PipelineResource(gitSourceResourceName, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/spring-projects/spring-petclinic"), + tb.PipelineResourceSpecParam("Revision", revision), + tb.PipelineResourceSpecParam("Refspec", refspec), + tb.PipelineResourceSpecParam("sslVerify", sslverify), + tb.PipelineResourceSpecParam("httpProxy", httpproxy), + tb.PipelineResourceSpecParam("httpsProxy", httpsproxy), + tb.PipelineResourceSpecParam("noProxy", noproxy), + )) +} + func getGitPipelineResource(revision, refspec, sslverify, httpproxy, httpsproxy, noproxy string) *v1alpha1.PipelineResource { return tb.PipelineResource(gitSourceResourceName, tb.PipelineResourceSpec( v1alpha1.PipelineResourceTypeGit, diff --git a/vendor/github.com/tektoncd/pipeline/test/init_test.go b/vendor/github.com/tektoncd/pipeline/test/init_test.go index 9b5da5516da..7b279b81e9e 100644 --- a/vendor/github.com/tektoncd/pipeline/test/init_test.go +++ b/vendor/github.com/tektoncd/pipeline/test/init_test.go @@ -1,4 +1,4 @@ -// +build e2e +// +build e2e examples /* Copyright 2019 The Tekton Authors diff --git a/vendor/github.com/tektoncd/pipeline/test/v1alpha1/git_checkout_test.go b/vendor/github.com/tektoncd/pipeline/test/v1alpha1/git_checkout_test.go index efa40c9eee1..b28f81471d9 100644 --- a/vendor/github.com/tektoncd/pipeline/test/v1alpha1/git_checkout_test.go +++ b/vendor/github.com/tektoncd/pipeline/test/v1alpha1/git_checkout_test.go @@ -297,6 +297,63 @@ func TestGitPipelineRunFail_HTTPS_PROXY(t *testing.T) { } } +// TestGitPipelineRunWithNonMasterBranch is an integration test that will verify the source code is either fetched or pulled +// successfully under different revision inputs (default branch, branch) +// This test will run on spring-petclinic repository which does not contain a master branch as the default branch +func TestGitPipelineRunWithNonMasterBranch(t *testing.T) { + t.Parallel() + + revisions := []string{"", "main"} + + for _, revision := range revisions { + + t.Run(revision, func(t *testing.T) { + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResourceSpringPetClinic(revision, "", "true", "", "", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask()); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline()); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun()); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } + }) + } +} + +// getGitPipelineResourceSpringPetClinic will help to clone the spring-petclinic repository which does not contains master branch +func getGitPipelineResourceSpringPetClinic(revision, refspec, sslverify, httpproxy, httpsproxy, noproxy string) *v1alpha1.PipelineResource { + return tb.PipelineResource(gitSourceResourceName, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/spring-projects/spring-petclinic"), + tb.PipelineResourceSpecParam("Revision", revision), + tb.PipelineResourceSpecParam("Refspec", refspec), + tb.PipelineResourceSpecParam("sslVerify", sslverify), + tb.PipelineResourceSpecParam("httpProxy", httpproxy), + tb.PipelineResourceSpecParam("httpsProxy", httpsproxy), + tb.PipelineResourceSpecParam("noProxy", noproxy), + )) +} + func getGitPipelineResource(revision, refspec, sslverify, httpproxy, httpsproxy, noproxy string) *v1alpha1.PipelineResource { return tb.PipelineResource(gitSourceResourceName, tb.PipelineResourceSpec( v1alpha1.PipelineResourceTypeGit, diff --git a/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root.go b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root.go new file mode 100644 index 00000000000..0cf17e29c92 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "sort" + + "github.com/spf13/cobra" + "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" + "github.com/tektoncd/triggers/pkg/template" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + rootCmd = &cobra.Command{ + Use: "binding-eval", + Short: "Tekton TriggerBinding evaluator", + Run: rootRun, + } + + bindingPath string + httpPath string +) + +func init() { + rootCmd.Flags().StringVarP(&bindingPath, "binding", "b", "", "Path to trigger binding") + rootCmd.Flags().StringVarP(&httpPath, "http_request", "r", "", "Path to HTTP request") + if err := rootCmd.MarkFlagRequired("binding"); err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func rootRun(cmd *cobra.Command, args []string) { + if err := evalBinding(os.Stdout, bindingPath, httpPath); err != nil { + log.Fatal(err) + } +} + +func evalBinding(w io.Writer, bindingPath, httpPath string) error { + // Read HTTP request. + r, err := readHTTP(httpPath) + if err != nil { + return fmt.Errorf("error reading HTTP file: %w", err) + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return fmt.Errorf("error reading HTTP body: %w", err) + } + + // Read bindings. + bindings, err := readBindings(bindingPath) + if err != nil { + return fmt.Errorf("error reading bindings: %w", err) + } + + t := template.ResolvedTrigger{ + TriggerBindings: bindings, + } + params, err := template.ResolveParams(t, body, r.Header) + if err != nil { + return fmt.Errorf("error resolving params: %w", err) + } + + // Sort results for stable output. + sort.SliceStable(params, func(i, j int) bool { + return params[i].Name < params[j].Name + }) + + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + if err := enc.Encode(params); err != nil { + return fmt.Errorf("error encoding params: %w", err) + } + + return nil +} + +func readBindings(path string) ([]*v1alpha1.TriggerBinding, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("error reading binding file: %w", err) + } + defer f.Close() + + var list []*v1alpha1.TriggerBinding + decoder := streaming.NewDecoder(f, scheme.Codecs.UniversalDecoder()) + b := new(v1alpha1.TriggerBinding) + for err == nil { + _, _, err = decoder.Decode(nil, b) + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf("error decoding bindings: %w", err) + } + break + } + list = append(list, b) + } + if err != nil && err != io.EOF { + return nil, fmt.Errorf("error decoding bindings: %w", err) + } + + return list, nil +} + +func readHTTP(path string) (*http.Request, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("error opening file: %w", err) + } + defer f.Close() + + return http.ReadRequest(bufio.NewReader(f)) +} + +// Execute runs the command. +func Execute() error { + return rootCmd.Execute() +} diff --git a/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root_test.go b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root_test.go new file mode 100644 index 00000000000..ad8a1f0e07c --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/cmd/root_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestEvalBinding(t *testing.T) { + out := new(bytes.Buffer) + if err := evalBinding(out, "../testdata/triggerbinding.yaml", "../testdata/http.txt"); err != nil { + t.Fatalf("evalBinding: %v", err) + } + + want := `[ + { + "name": "bar", + "value": "tacocat" + }, + { + "name": "foo", + "value": "body" + } +] +` + if diff := cmp.Diff(want, out.String()); diff != "" { + t.Errorf("-want +got: %s", diff) + } +} diff --git a/vendor/github.com/tektoncd/triggers/cmd/binding-eval/main.go b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/main.go new file mode 100644 index 00000000000..fb6e7d01645 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/main.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/tektoncd/triggers/cmd/binding-eval/cmd" +) + +func main() { + if err := cmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/http.txt b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/http.txt new file mode 100644 index 00000000000..d6a66b5f6f7 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/http.txt @@ -0,0 +1,6 @@ +POST /foo HTTP/1.1 +Content-Length: 16 +Content-Type: application/json +X-Header: tacocat + +{"test": "body"} diff --git a/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/triggerbinding.yaml b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/triggerbinding.yaml new file mode 100644 index 00000000000..1e8873b473b --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/cmd/binding-eval/testdata/triggerbinding.yaml @@ -0,0 +1,10 @@ +apiVersion: tekton.dev/v1alpha1 +kind: TriggerBinding +metadata: + name: pipeline-binding +spec: + params: + - name: foo + value: $(body.test) + - name: bar + value: $(header.X-Header) diff --git a/vendor/github.com/tektoncd/triggers/config/500-webhooks.yaml b/vendor/github.com/tektoncd/triggers/config/500-webhooks.yaml index 39af298ba46..aa7a2307971 100644 --- a/vendor/github.com/tektoncd/triggers/config/500-webhooks.yaml +++ b/vendor/github.com/tektoncd/triggers/config/500-webhooks.yaml @@ -21,7 +21,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-triggers - triggers.tekton.dev/release: devel + triggers.tekton.dev/release: "devel" # The data is populated at install time. --- @@ -33,7 +33,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-triggers - triggers.tekton.dev/release: devel + triggers.tekton.dev/release: "devel" webhooks: - admissionReviewVersions: - v1beta1 @@ -54,7 +54,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-triggers - triggers.tekton.dev/release: devel + triggers.tekton.dev/release: "devel" webhooks: - admissionReviewVersions: - v1beta1 @@ -74,7 +74,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-triggers - triggers.tekton.dev/release: devel + triggers.tekton.dev/release: "devel" webhooks: - admissionReviewVersions: - v1beta1 diff --git a/vendor/github.com/tektoncd/triggers/config/controller-service.yaml b/vendor/github.com/tektoncd/triggers/config/controller-service.yaml index 237a0d0bbf3..782bf8996fb 100644 --- a/vendor/github.com/tektoncd/triggers/config/controller-service.yaml +++ b/vendor/github.com/tektoncd/triggers/config/controller-service.yaml @@ -19,7 +19,7 @@ metadata: app.kubernetes.io/name: controller app.kubernetes.io/component: controller app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers triggers.tekton.dev/release: "devel" app: tekton-triggers-controller diff --git a/vendor/github.com/tektoncd/triggers/config/controller.yaml b/vendor/github.com/tektoncd/triggers/config/controller.yaml index 3f35568c7ea..40b76e11179 100644 --- a/vendor/github.com/tektoncd/triggers/config/controller.yaml +++ b/vendor/github.com/tektoncd/triggers/config/controller.yaml @@ -21,7 +21,7 @@ metadata: app.kubernetes.io/name: controller app.kubernetes.io/component: controller app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers # tekton.dev/release value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml triggers.tekton.dev/release: "devel" @@ -41,7 +41,7 @@ spec: app.kubernetes.io/name: controller app.kubernetes.io/component: controller app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers app: tekton-triggers-controller triggers.tekton.dev/release: "devel" diff --git a/vendor/github.com/tektoncd/triggers/config/webhook-service.yaml b/vendor/github.com/tektoncd/triggers/config/webhook-service.yaml index 51162968f6a..e93c15412ea 100644 --- a/vendor/github.com/tektoncd/triggers/config/webhook-service.yaml +++ b/vendor/github.com/tektoncd/triggers/config/webhook-service.yaml @@ -21,7 +21,7 @@ metadata: app.kubernetes.io/name: webhook app.kubernetes.io/component: webhook app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers app: tekton-triggers-webhook version: "devel" diff --git a/vendor/github.com/tektoncd/triggers/config/webhook.yaml b/vendor/github.com/tektoncd/triggers/config/webhook.yaml index 17f29ac1f35..10e53c7e964 100644 --- a/vendor/github.com/tektoncd/triggers/config/webhook.yaml +++ b/vendor/github.com/tektoncd/triggers/config/webhook.yaml @@ -21,7 +21,7 @@ metadata: app.kubernetes.io/name: webhook app.kubernetes.io/component: webhook app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers # tekton.dev/release value replaced with inputs.params.versionTag in triggers/tekton/publish.yaml triggers.tekton.dev/release: "devel" @@ -41,7 +41,7 @@ spec: app.kubernetes.io/name: webhook app.kubernetes.io/component: webhook app.kubernetes.io/instance: default - app.kubernetes.io/version: devel + app.kubernetes.io/version: "devel" app.kubernetes.io/part-of: tekton-triggers app: tekton-triggers-webhook triggers.tekton.dev/release: "devel" diff --git a/vendor/github.com/tektoncd/triggers/docs/eventlisteners.md b/vendor/github.com/tektoncd/triggers/docs/eventlisteners.md index be9efcdf9b8..59c8952a7ba 100644 --- a/vendor/github.com/tektoncd/triggers/docs/eventlisteners.md +++ b/vendor/github.com/tektoncd/triggers/docs/eventlisteners.md @@ -16,6 +16,7 @@ using [Event Interceptors](#Interceptors). - [Syntax](#syntax) - [ServiceAccountName](#serviceAccountName) + - [PodTemplate](#podTemplate) - [Triggers](#triggers) - [Interceptors](#interceptors) - [Logging](#logging) @@ -44,6 +45,8 @@ the following fields: - Optional: - [`serviceType`](#serviceType) - Specifies what type of service the sink pod is exposed as + - [`podTemplate`](#podTemplate) - Specifies the PodTemplate + for your EventListener pod [kubernetes-overview]: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields @@ -147,6 +150,23 @@ documentations for details. For external services to connect to your cluster (e.g. GitHub sending webhooks), check out the guide on [exposing EventListeners](./exposing-eventlisteners.md). +## PodTemplate + +The `podTemplate` field is optional. A PodTemplate is specifications for +creating EventListener pod. A PodTemplate consists of: +- `tolerations` - list of toleration which allows pods to schedule onto the nodes with matching taints. +This is needed only if you want to schedule EventListener pod to a tainted node. + +```yaml +spec: + podTemplate: + tolerations: + - key: key + value: value + operator: Equal + effect: NoSchedule +``` + ### Logging EventListener sinks are exposed as Kubernetes services that are backed by a Pod @@ -360,9 +380,8 @@ spec: ### Bitbucket Interceptors -Bitbucket Interceptors contain logic to validate and filter webhooks that come from -Bitbucket server or cloud. Supported features include validating webhooks actually came from Bitbucket as well as -filtering incoming events. +The Bitbucket interceptor provides support for hooks originating in [Bitbucket server](https://confluence.atlassian.com/bitbucketserver), providing server hook signature validation and event-filtering. +[Bitbucket cloud](https://support.atlassian.com/bitbucket-cloud/) is not currently supported by this interceptor, as it has no secret validation, so you could match on the incoming requests using the CEL interceptor. To use this Interceptor as a validator, create a secret string using the method of your choice, and configure the Bitbucket webhook to use that secret value. diff --git a/vendor/github.com/tektoncd/triggers/docs/triggerbindings.md b/vendor/github.com/tektoncd/triggers/docs/triggerbindings.md index 63a900e344e..241317e7224 100644 --- a/vendor/github.com/tektoncd/triggers/docs/triggerbindings.md +++ b/vendor/github.com/tektoncd/triggers/docs/triggerbindings.md @@ -45,14 +45,14 @@ case-insensitive. These are all valid expressions: -```shell script +```shell $(body.key1) $(.body.key) ``` These are invalid expressions: -```shell script +```shell .body.key1 # INVALID - Not wrapped in $() $({body) # INVALID - Ending curly brace absent ``` @@ -76,7 +76,7 @@ $(body.tekton\.dev) -> "triggers" ### Examples -```shell script +```shell `$(body)` is replaced by the entire body. @@ -162,3 +162,51 @@ spec: template: name: pipeline-template ``` + +## Debugging + +### Evaluating TriggerBindings + +As a convenience, the `binding-eval` tool allows you to evaluate TriggerBindings +for a given HTTP request to determine what the resulting parameters would be +during trigger execution. + +```sh +$ cat testdata/triggerbinding.yaml +apiVersion: tekton.dev/v1alpha1 +kind: TriggerBinding +metadata: + name: pipeline-binding +spec: + params: + - name: foo + value: $(body.test) + - name: bar + value: $(header.X-Header) + +$ cat testdata/http.txt +POST /foo HTTP/1.1 +Content-Length: 16 +Content-Type: application/json +X-Header: tacocat + +{"test": "body"} + +$ binding-eval -b testdata/triggerbinding.yaml -r testdata/http.txt +[ + { + "name": "foo", + "value": "body" + }, + { + "name": "bar", + "value": "tacocat" + } +] +``` + +To install, run: + +```sh +$ go get -u github.com/tektoncd/triggers/cmd/binding-eval +``` diff --git a/vendor/github.com/tektoncd/triggers/docs/triggertemplates.md b/vendor/github.com/tektoncd/triggers/docs/triggertemplates.md index e36e5e13253..9685f20d275 100644 --- a/vendor/github.com/tektoncd/triggers/docs/triggertemplates.md +++ b/vendor/github.com/tektoncd/triggers/docs/triggertemplates.md @@ -47,9 +47,9 @@ spec: type: git params: - name: revision - value: $(params.gitrevision) + value: $(tt.params.gitrevision) - name: url - value: $(params.gitrepositoryurl) + value: $(tt.params.gitrepositoryurl) ``` `TriggerTemplates` currently support the following [Tekton Pipelines](https://github.com/tektoncd/pipelines) resources: @@ -102,11 +102,11 @@ have an optional `description` and `default` value. substitution syntax, where `` is the name of the parameter: ```YAML -$(params.) +$(tt.params.) ``` -`params` can be referenced in the `resourceTemplates` section of a -`TriggerTemplate`. The purpose of `params` is to make `TriggerTemplates` +`tt.params` can be referenced in the `resourceTemplates` section of a +`TriggerTemplate`. The purpose of `tt.params` is to make `TriggerTemplates` reusable. ## Best Practices diff --git a/vendor/github.com/tektoncd/triggers/examples/bitbucket/triggertemplate.yaml b/vendor/github.com/tektoncd/triggers/examples/bitbucket/triggertemplate.yaml index a910ad38001..4fa420338fa 100644 --- a/vendor/github.com/tektoncd/triggers/examples/bitbucket/triggertemplate.yaml +++ b/vendor/github.com/tektoncd/triggers/examples/bitbucket/triggertemplate.yaml @@ -30,6 +30,6 @@ spec: type: git params: - name: revision - value: $(params.gitrevision) + value: $(tt.params.gitrevision) - name: url - value: $(params.gitrepositoryurl) \ No newline at end of file + value: $(tt.params.gitrepositoryurl) \ No newline at end of file diff --git a/vendor/github.com/tektoncd/triggers/examples/eventlisteners/eventlistener-tolerations.yaml b/vendor/github.com/tektoncd/triggers/examples/eventlisteners/eventlistener-tolerations.yaml new file mode 100644 index 00000000000..44959e5d237 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/examples/eventlisteners/eventlistener-tolerations.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: triggers.tekton.dev/v1alpha1 +kind: EventListener +metadata: + name: listener-tolerations +spec: + serviceAccountName: tekton-triggers-example-sa + podTemplate: + tolerations: + - key: key + value: value + operator: Equal + effect: NoSchedule + triggers: + - name: foo-trig + bindings: + - ref: pipeline-binding + - ref: message-binding + template: + name: pipeline-template diff --git a/vendor/github.com/tektoncd/triggers/examples/github/triggertemplate.yaml b/vendor/github.com/tektoncd/triggers/examples/github/triggertemplate.yaml index 6f1aa88ba62..56c8bbc8b06 100644 --- a/vendor/github.com/tektoncd/triggers/examples/github/triggertemplate.yaml +++ b/vendor/github.com/tektoncd/triggers/examples/github/triggertemplate.yaml @@ -32,4 +32,4 @@ spec: - name: revision value: $(params.gitrevision) - name: url - value: $(params.gitrepositoryurl) + value: $(tt.params.gitrepositoryurl) diff --git a/vendor/github.com/tektoncd/triggers/examples/gitlab/gitlab-push-listener.yaml b/vendor/github.com/tektoncd/triggers/examples/gitlab/gitlab-push-listener.yaml index a179d875e5d..1705b6a1999 100644 --- a/vendor/github.com/tektoncd/triggers/examples/gitlab/gitlab-push-listener.yaml +++ b/vendor/github.com/tektoncd/triggers/examples/gitlab/gitlab-push-listener.yaml @@ -29,9 +29,9 @@ spec: type: git params: - name: revision - value: $(params.gitrevision) + value: $(tt.params.gitrevision) - name: url - value: $(params.gitrepositoryurl) + value: $(tt.params.gitrepositoryurl) --- apiVersion: triggers.tekton.dev/v1alpha1 kind: TriggerBinding diff --git a/vendor/github.com/tektoncd/triggers/examples/triggertemplates/triggertemplate.yaml b/vendor/github.com/tektoncd/triggers/examples/triggertemplates/triggertemplate.yaml index 47ebaeed47b..92962cc94d1 100644 --- a/vendor/github.com/tektoncd/triggers/examples/triggertemplates/triggertemplate.yaml +++ b/vendor/github.com/tektoncd/triggers/examples/triggertemplates/triggertemplate.yaml @@ -33,6 +33,6 @@ spec: type: git params: - name: revision - value: $(params.gitrevision) + value: $(tt.params.gitrevision) - name: url - value: $(params.gitrepositoryurl) + value: $(tt.params.gitrepositoryurl) diff --git a/vendor/github.com/tektoncd/triggers/go.mod b/vendor/github.com/tektoncd/triggers/go.mod index 0a5f95a0236..b3f7e4ebcb9 100644 --- a/vendor/github.com/tektoncd/triggers/go.mod +++ b/vendor/github.com/tektoncd/triggers/go.mod @@ -14,7 +14,9 @@ require ( github.com/gorilla/mux v1.7.3 github.com/grpc-ecosystem/grpc-gateway v1.13.0 // indirect github.com/openzipkin/zipkin-go v0.2.2 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect + github.com/spf13/cobra v0.0.5 github.com/tektoncd/pipeline v0.11.3 github.com/tektoncd/plumbing v0.0.0-20200430135134-e53521e1d887 github.com/tidwall/gjson v1.3.5 // indirect diff --git a/vendor/github.com/tektoncd/triggers/go.sum b/vendor/github.com/tektoncd/triggers/go.sum index c05603ff5d7..7f75cfee4ae 100644 --- a/vendor/github.com/tektoncd/triggers/go.sum +++ b/vendor/github.com/tektoncd/triggers/go.sum @@ -89,7 +89,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= @@ -131,6 +130,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -156,7 +156,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -199,7 +198,6 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -275,6 +273,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -379,8 +378,9 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -395,7 +395,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -436,6 +435,7 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -456,7 +456,6 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tektoncd/pipeline v0.11.3 h1:OY7uaW784XgA3F9Ee/wWyd67/Ixt6jPCiXWCao9v0HI= github.com/tektoncd/pipeline v0.11.3/go.mod h1:hlkH32S92+/UODROH0dmxzyuMxfRFp/Nc3e29MewLn8= -github.com/tektoncd/plumbing v0.0.0-20200217163359-cd0db6e567d2 h1:BksmpUwtap3THXJ8Z4KGcotsvpRdFQKySjDHgtc22lA= github.com/tektoncd/plumbing v0.0.0-20200217163359-cd0db6e567d2/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y= github.com/tektoncd/plumbing v0.0.0-20200430135134-e53521e1d887 h1:crv70CBAJ2gZFSbf13aRVwdbjR2GYwTms/ZEok/SnFM= github.com/tektoncd/plumbing v0.0.0-20200430135134-e53521e1d887/go.mod h1:cZPJIeTIoP7UPTxQyTQLs7VE1TiXJSNj0te+If4Q+jI= @@ -516,7 +515,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -573,7 +571,6 @@ golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -619,7 +616,6 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -672,7 +668,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -706,7 +701,6 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03 h1:4HYDjxeNXAOTv3o1N2tjo8UUSlhQgAD52FVkwxnWgM8= google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -721,7 +715,6 @@ google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= @@ -758,7 +751,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -790,7 +782,6 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= @@ -814,7 +805,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go index b591079b114..2047b65022a 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go @@ -57,6 +57,11 @@ type EventListenerSpec struct { ServiceAccountName string `json:"serviceAccountName"` Triggers []EventListenerTrigger `json:"triggers"` ServiceType corev1.ServiceType `json:"serviceType,omitempty"` + PodTemplate PodTemplate `json:"podTemplate,omitempty"` +} + +type PodTemplate struct { + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // EventListenerTrigger represents a connection between TriggerBinding, Params, @@ -86,7 +91,7 @@ type EventInterceptor struct { GitHub *GitHubInterceptor `json:"github,omitempty"` GitLab *GitLabInterceptor `json:"gitlab,omitempty"` CEL *CELInterceptor `json:"cel,omitempty"` - Bitbucket *BitBucketInterceptor `json:"bitbucket,omitempty"` + Bitbucket *BitbucketInterceptor `json:"bitbucket,omitempty"` } // WebhookInterceptor provides a webhook to intercept and pre-process events @@ -101,8 +106,8 @@ type WebhookInterceptor struct { Header []v1beta1.Param `json:"header,omitempty"` } -// BitBucketInterceptor provides a webhook to intercept and pre-process events -type BitBucketInterceptor struct { +// BitbucketInterceptor provides a webhook to intercept and pre-process events +type BitbucketInterceptor struct { SecretRef *SecretRef `json:"secretRef,omitempty"` EventTypes []string `json:"eventTypes,omitempty"` } diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go index 05f1b4f8140..14c5d0e4217 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation_test.go @@ -349,7 +349,7 @@ func TestEventListenerValidate_error(t *testing.T) { Interceptors: []*v1alpha1.EventInterceptor{{ GitHub: &v1alpha1.GitHubInterceptor{}, GitLab: &v1alpha1.GitLabInterceptor{}, - Bitbucket: &v1alpha1.BitBucketInterceptor{}, + Bitbucket: &v1alpha1.BitbucketInterceptor{}, }}, }}, }, diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go index 765f26f3f33..0698281fed6 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go @@ -28,8 +28,8 @@ import ( "knative.dev/pkg/apis" ) -// paramsRegexp captures TriggerTemplate parameter names $(params.NAME) -var paramsRegexp = regexp.MustCompile(`\$\(params.(?P[_a-zA-Z][_a-zA-Z0-9.-]*)\)`) +// paramsRegexp captures TriggerTemplate parameter names $(tt.params.NAME) or $(params.NAME) +var paramsRegexp = regexp.MustCompile(`\$\((params|tt.params).([_a-zA-Z][_a-zA-Z0-9.-]*)\)`) // Validate validates a TriggerTemplate. func (t *TriggerTemplate) Validate(ctx context.Context) *apis.FieldError { @@ -91,16 +91,24 @@ func verifyParamDeclarations(params []ParamSpec, templates []TriggerResourceTemp declaredParamNames[param.Name] = struct{}{} } for i, template := range templates { - // Get all params in the template $(params.NAME) + // Get all params in the template $(tt.params.NAME) or $(params.NAME) templateParams := paramsRegexp.FindAllSubmatch(template.RawExtension.Raw, -1) for _, templateParam := range templateParams { - templateParamName := string(templateParam[1]) + templateParamName := string(templateParam[2]) if _, ok := declaredParamNames[templateParamName]; !ok { + // This logic is to get the tag and display error dynamically for both tt.params and params. + // TODO(#606) + var tag string + if string(templateParam[1]) == "params" { + tag = "params" + } else { + tag = "tt.params" + } fieldErr := apis.ErrInvalidValue( - fmt.Sprintf("undeclared param '$(params.%s)'", templateParamName), + fmt.Sprintf("undeclared param '$(%s.%s)'", tag, templateParamName), fmt.Sprintf("[%d]", i), ) - fieldErr.Details = fmt.Sprintf("'$(params.%s)' must be declared in spec.params", templateParamName) + fieldErr.Details = fmt.Sprintf("'$(%s.%s)' must be declared in spec.params", tag, templateParamName) return fieldErr } } diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation_test.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation_test.go index 7c4a80f8923..dc9fff600d0 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation_test.go @@ -36,8 +36,17 @@ var v1beta1ResourceTemplate = runtime.RawExtension{ Raw: []byte(`{"kind":"PipelineRun","apiVersion":"tekton.dev/v1beta1","metadata":{"creationTimestamp":null},"spec":{},"status":{}}`), } var paramResourceTemplate = runtime.RawExtension{ + Raw: []byte(`{"kind":"PipelineRun","apiVersion":"tekton.dev/v1alpha1","metadata":{"creationTimestamp":null},"spec": "$(tt.params.foo)","status":{}}`), +} +var deprecatedParamResourceTemplate = runtime.RawExtension{ Raw: []byte(`{"kind":"PipelineRun","apiVersion":"tekton.dev/v1alpha1","metadata":{"creationTimestamp":null},"spec": "$(params.foo)","status":{}}`), } +var invalidParamResourceTemplate = runtime.RawExtension{ + Raw: []byte(`{"kind":"PipelineRun","apiVersion":"tekton.dev/v1alpha1","metadata":{"creationTimestamp":null},"spec": "$(.foo)","status":{}}`), +} +var bothParamResourceTemplate = runtime.RawExtension{ + Raw: []byte(`{"kind":"PipelineRun","apiVersion":"tekton.dev/v1alpha1","metadata":{"creationTimestamp":null},"spec": {"$(params1.foo)", "$(params.bar)", "$(tt.params.baz)"},"status":{}}`), +} func TestTriggerTemplate_Validate(t *testing.T) { tcs := []struct { @@ -130,14 +139,43 @@ func TestTriggerTemplate_Validate(t *testing.T) { b.TriggerResourceTemplate(paramResourceTemplate))), want: nil, }, { - name: "params used in resource template are not declared", + name: "tt.params used in resource template are not declared", template: b.TriggerTemplate("tt", "foo", b.TriggerTemplateSpec( b.TriggerResourceTemplate(paramResourceTemplate))), + want: &apis.FieldError{ + Message: "invalid value: undeclared param '$(tt.params.foo)'", + Paths: []string{"spec.resourcetemplates[0]"}, + Details: "'$(tt.params.foo)' must be declared in spec.params", + }, + }, { + name: "params used in resource template are not declared", + template: b.TriggerTemplate("tt", "foo", b.TriggerTemplateSpec( + b.TriggerResourceTemplate(deprecatedParamResourceTemplate))), want: &apis.FieldError{ Message: "invalid value: undeclared param '$(params.foo)'", Paths: []string{"spec.resourcetemplates[0]"}, Details: "'$(params.foo)' must be declared in spec.params", }, + }, { + name: "both params and tt.params used in resource template are not declared", + template: b.TriggerTemplate("tt", "foo", b.TriggerTemplateSpec( + b.TriggerResourceTemplate(bothParamResourceTemplate))), + want: &apis.FieldError{ + Message: "invalid value: undeclared param '$(params.bar)'", + Paths: []string{"spec.resourcetemplates[0]"}, + Details: "'$(params.bar)' must be declared in spec.params", + }, + }, { + name: "invalid params used in resource template are not declared", + template: b.TriggerTemplate("tt", "foo", b.TriggerTemplateSpec( + b.TriggerResourceTemplate(invalidParamResourceTemplate))), + want: nil, + }, { + name: "invalid params used in resource template are declared", + template: b.TriggerTemplate("tt", "foo", b.TriggerTemplateSpec( + b.TriggerTemplateParam("foo", "desc", "val"), + b.TriggerResourceTemplate(invalidParamResourceTemplate))), + want: nil, }} for _, tc := range tcs { diff --git a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go index 09a021b907c..603cc118da4 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go @@ -27,7 +27,7 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BitBucketInterceptor) DeepCopyInto(out *BitBucketInterceptor) { +func (in *BitbucketInterceptor) DeepCopyInto(out *BitbucketInterceptor) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef @@ -42,12 +42,12 @@ func (in *BitBucketInterceptor) DeepCopyInto(out *BitBucketInterceptor) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitBucketInterceptor. -func (in *BitBucketInterceptor) DeepCopy() *BitBucketInterceptor { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketInterceptor. +func (in *BitbucketInterceptor) DeepCopy() *BitbucketInterceptor { if in == nil { return nil } - out := new(BitBucketInterceptor) + out := new(BitbucketInterceptor) in.DeepCopyInto(out) return out } @@ -175,7 +175,7 @@ func (in *EventInterceptor) DeepCopyInto(out *EventInterceptor) { } if in.Bitbucket != nil { in, out := &in.Bitbucket, &out.Bitbucket - *out = new(BitBucketInterceptor) + *out = new(BitbucketInterceptor) (*in).DeepCopyInto(*out) } return @@ -299,6 +299,7 @@ func (in *EventListenerSpec) DeepCopyInto(out *EventListenerSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.PodTemplate.DeepCopyInto(&out.PodTemplate) return } @@ -480,6 +481,29 @@ func (in *ParamSpec) DeepCopy() *ParamSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretRef) DeepCopyInto(out *SecretRef) { *out = *in diff --git a/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket.go b/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket.go index 90032b1c3a5..b44f399c69c 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket.go +++ b/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket.go @@ -33,11 +33,11 @@ import ( type Interceptor struct { KubeClientSet kubernetes.Interface Logger *zap.SugaredLogger - Bitbucket *triggersv1.BitBucketInterceptor + Bitbucket *triggersv1.BitbucketInterceptor EventListenerNamespace string } -func NewInterceptor(bh *triggersv1.BitBucketInterceptor, k kubernetes.Interface, ns string, l *zap.SugaredLogger) interceptors.Interceptor { +func NewInterceptor(bh *triggersv1.BitbucketInterceptor, k kubernetes.Interface, ns string, l *zap.SugaredLogger) interceptors.Interceptor { return &Interceptor{ Logger: l, Bitbucket: bh, diff --git a/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket_test.go b/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket_test.go index 8dafe97b496..c32c28dbdbb 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/interceptors/bitbucket/bitbucket_test.go @@ -42,14 +42,14 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { } tests := []struct { name string - Bitbucket *triggersv1.BitBucketInterceptor + Bitbucket *triggersv1.BitbucketInterceptor args args want []byte wantErr bool }{ { name: "no secret", - Bitbucket: &triggersv1.BitBucketInterceptor{}, + Bitbucket: &triggersv1.BitbucketInterceptor{}, args: args{ payload: ioutil.NopCloser(bytes.NewBufferString("somepayload")), signature: "foo", @@ -59,7 +59,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "invalid header for secret", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretName: "mysecret", SecretKey: "token", @@ -81,7 +81,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "valid header for secret", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretName: "mysecret", SecretKey: "token", @@ -106,7 +106,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "matching event", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ EventTypes: []string{"pr:opened", "repo:refs_changed"}, }, args: args{ @@ -118,7 +118,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "no matching event", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ EventTypes: []string{"pr:opened", "repo:refs_changed"}, }, args: args{ @@ -129,7 +129,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "valid header for secret and matching event", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretName: "mysecret", SecretKey: "token", @@ -156,7 +156,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "valid header for secret, but no matching event", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretName: "mysecret", SecretKey: "token", @@ -182,7 +182,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { }, { name: "invalid header for secret, but matching event", - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretName: "mysecret", SecretKey: "token", @@ -205,7 +205,7 @@ func TestInterceptor_ExecuteTrigger_Signature(t *testing.T) { wantErr: true, }, { name: "nil body does not panic", - Bitbucket: &triggersv1.BitBucketInterceptor{}, + Bitbucket: &triggersv1.BitbucketInterceptor{}, args: args{ payload: nil, signature: "foo", diff --git a/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go b/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go index c5055114ebf..b31fedd98a3 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go +++ b/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go @@ -319,6 +319,7 @@ func (c *Reconciler) reconcileDeployment(el *v1alpha1.EventListener) error { Labels: labels, }, Spec: corev1.PodSpec{ + Tolerations: el.Spec.PodTemplate.Tolerations, ServiceAccountName: el.Spec.ServiceAccountName, Containers: []corev1.Container{container}, @@ -358,6 +359,10 @@ func (c *Reconciler) reconcileDeployment(el *v1alpha1.EventListener) error { existingDeployment.Spec.Template.Spec.ServiceAccountName = deployment.Spec.Template.Spec.ServiceAccountName updated = true } + if !reflect.DeepEqual(existingDeployment.Spec.Template.Spec.Tolerations, deployment.Spec.Template.Spec.Tolerations) { + existingDeployment.Spec.Template.Spec.Tolerations = deployment.Spec.Template.Spec.Tolerations + updated = true + } if len(existingDeployment.Spec.Template.Spec.Containers) == 0 || len(existingDeployment.Spec.Template.Spec.Containers) > 1 { existingDeployment.Spec.Template.Spec.Containers = []corev1.Container{container} diff --git a/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go b/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go index ffd48142bd8..a0d61ac3430 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener/eventlistener_test.go @@ -50,6 +50,11 @@ func init() { eventListener0 = bldr.EventListener(eventListenerName, namespace, bldr.EventListenerSpec( bldr.EventListenerServiceAccount("sa"), + bldr.EventListenerPodTemplate( + bldr.EventListenerPodTemplateSpec( + bldr.EventListenerPodTemplateTolerations(nil), + ), + ), ), bldr.EventListenerStatus( bldr.EventListenerConfig(generatedResourceName), @@ -76,9 +81,17 @@ var ( Name: reconcilerNamespace, }, } - reconcileKey = fmt.Sprintf("%s/%s", namespace, eventListenerName) - updateLabel = map[string]string{"update": "true"} - updatedSa = "updatedSa" + reconcileKey = fmt.Sprintf("%s/%s", namespace, eventListenerName) + updateLabel = map[string]string{"update": "true"} + updatedSa = "updatedSa" + updateTolerations = []corev1.Toleration{ + { + Key: "key", + Operator: "Equal", + Value: "value", + Effect: "NoSchedule", + }, + } deploymentAvailableCondition = appsv1.DeploymentCondition{ Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue, @@ -266,6 +279,9 @@ func Test_reconcileDeployment(t *testing.T) { eventListener4 := eventListener1.DeepCopy() eventListener4.Spec.ServiceAccountName = updatedSa + eventListener5 := eventListener1.DeepCopy() + eventListener5.Spec.PodTemplate.Tolerations = updateTolerations + var replicas int32 = 1 // deployment1 == initial deployment deployment1 := &appsv1.Deployment{ @@ -280,6 +296,7 @@ func Test_reconcileDeployment(t *testing.T) { Labels: generatedLabels, }, Spec: corev1.PodSpec{ + Tolerations: eventListener0.Spec.PodTemplate.Tolerations, ServiceAccountName: eventListener0.Spec.ServiceAccountName, Containers: []corev1.Container{ { @@ -373,6 +390,9 @@ func Test_reconcileDeployment(t *testing.T) { deployment4 := deployment1.DeepCopy() deployment4.Spec.Template.Spec.ServiceAccountName = updatedSa + deployment5 := deployment1.DeepCopy() + deployment5.Spec.Template.Spec.Tolerations = updateTolerations + deploymentMissingVolumes := deployment1.DeepCopy() deploymentMissingVolumes.Spec.Template.Spec.Volumes = nil deploymentMissingVolumes.Spec.Template.Spec.Containers[0].VolumeMounts = nil @@ -458,7 +478,21 @@ func Test_reconcileDeployment(t *testing.T) { EventListeners: []*v1alpha1.EventListener{eventListener4}, Deployments: []*appsv1.Deployment{deployment4}, }, - }, { + }, + { + name: "eventlistener-tolerations-update", + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{eventListener5}, + Deployments: []*appsv1.Deployment{deployment1}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{eventListener5}, + Deployments: []*appsv1.Deployment{deployment5}, + }, + }, + { name: "eventlistener-config-volume-mount-update", startResources: test.Resources{ Namespaces: []*corev1.Namespace{namespaceResource}, @@ -512,6 +546,11 @@ func TestReconcile(t *testing.T) { eventListener1 := bldr.EventListener(eventListenerName, namespace, bldr.EventListenerSpec( bldr.EventListenerServiceAccount("sa"), + bldr.EventListenerPodTemplate( + bldr.EventListenerPodTemplateSpec( + bldr.EventListenerPodTemplateTolerations(nil), + ), + ), ), bldr.EventListenerStatus( bldr.EventListenerConfig(generatedResourceName), @@ -550,6 +589,9 @@ func TestReconcile(t *testing.T) { eventListener4 := eventListener3.DeepCopy() eventListener4.Spec.ServiceType = corev1.ServiceTypeNodePort + eventListener5 := eventListener2.DeepCopy() + eventListener5.Spec.PodTemplate.Tolerations = updateTolerations + var replicas int32 = 1 deployment1 := &appsv1.Deployment{ ObjectMeta: generateObjectMeta(eventListener0), @@ -563,6 +605,7 @@ func TestReconcile(t *testing.T) { Labels: generatedLabels, }, Spec: corev1.PodSpec{ + Tolerations: eventListener0.Spec.PodTemplate.Tolerations, ServiceAccountName: eventListener0.Spec.ServiceAccountName, Containers: []corev1.Container{{ Name: "event-listener", @@ -640,6 +683,9 @@ func TestReconcile(t *testing.T) { deployment3 := deployment2.DeepCopy() deployment3.Spec.Template.Spec.ServiceAccountName = updatedSa + deployment4 := deployment2.DeepCopy() + deployment4.Spec.Template.Spec.Tolerations = updateTolerations + service1 := &corev1.Service{ ObjectMeta: generateObjectMeta(eventListener0), Spec: corev1.ServiceSpec{ @@ -725,6 +771,22 @@ func TestReconcile(t *testing.T) { Services: []*corev1.Service{service2}, ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, }, + }, { + name: "update-eventlistener-tolerations", + key: reconcileKey, + startResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{eventListener5}, + Deployments: []*appsv1.Deployment{deployment2}, + Services: []*corev1.Service{service2}, + }, + endResources: test.Resources{ + Namespaces: []*corev1.Namespace{namespaceResource}, + EventListeners: []*v1alpha1.EventListener{eventListener5}, + Deployments: []*appsv1.Deployment{deployment4}, + Services: []*corev1.Service{service2}, + ConfigMaps: []*corev1.ConfigMap{loggingConfigMap}, + }, }, { name: "update-eventlistener-servicetype", key: reconcileKey, diff --git a/vendor/github.com/tektoncd/triggers/pkg/resources/create.go b/vendor/github.com/tektoncd/triggers/pkg/resources/create.go index b0a6d424179..b5fafbea34e 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/resources/create.go +++ b/vendor/github.com/tektoncd/triggers/pkg/resources/create.go @@ -40,11 +40,12 @@ func FindAPIResource(apiVersion, kind string, c discoveryclient.ServerResourcesI if err != nil { return nil, fmt.Errorf("error getting kubernetes server resources for apiVersion %s: %s", apiVersion, err) } - for _, apiResource := range resourceList.APIResources { - if apiResource.Kind != kind { + for i := range resourceList.APIResources { + r := &resourceList.APIResources[i] + if r.Kind != kind { continue } - r := &apiResource + // Resolve GroupVersion from parent list to have consistent resource identifiers. if r.Version == "" || r.Group == "" { gv, err := schema.ParseGroupVersion(resourceList.GroupVersion) diff --git a/vendor/github.com/tektoncd/triggers/pkg/sink/sink_test.go b/vendor/github.com/tektoncd/triggers/pkg/sink/sink_test.go index b1550d41a2a..4f6a1b2a30a 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/sink/sink_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/sink/sink_test.go @@ -153,7 +153,7 @@ func TestHandleEvent(t *testing.T) { Kind: "PipelineResource", }, ObjectMeta: metav1.ObjectMeta{ - Name: "$(params.name)", + Name: "$(tt.params.name)", Namespace: namespace, Labels: map[string]string{ "app": "$(params.foo)", @@ -273,7 +273,7 @@ func TestHandleEventWithInterceptors(t *testing.T) { Type: pipelinev1alpha1.PipelineResourceTypeGit, Params: []pipelinev1alpha1.ResourceParam{{ Name: "url", - Value: "$(params.url)", + Value: "$(tt.params.url)", }}, }, } @@ -413,7 +413,7 @@ func TestHandleEventWithWebhookInterceptors(t *testing.T) { Kind: "PipelineResource", }, ObjectMeta: metav1.ObjectMeta{ - Name: "$(params.name)", + Name: "$(tt.params.name)", Namespace: namespace, }, Spec: pipelinev1alpha1.PipelineResourceSpec{ @@ -898,7 +898,7 @@ func TestHandleEventWithInterceptorsAndTriggerAuth(t *testing.T) { } -func TestHandleEventWithBitBucketInterceptors(t *testing.T) { +func TestHandleEventWithBitbucketInterceptors(t *testing.T) { eventBody := json.RawMessage(`{"repository": {"links": {"clone": [{"href": "testurl", "name": "ssh"}, {"href": "testurl", "name": "http"}]}}, "changes": [{"ref": {"displayId": "test-branch"}}]}`) tb, tt := getResources(t, "$(body.repository.links.clone[1].href)") @@ -924,7 +924,7 @@ func TestHandleEventWithBitBucketInterceptors(t *testing.T) { Bindings: []*triggersv1.EventListenerBinding{{Name: "tb", Kind: "TriggerBinding"}}, Template: triggersv1.EventListenerTemplate{Name: "tt"}, Interceptors: []*triggersv1.EventInterceptor{{ - Bitbucket: &triggersv1.BitBucketInterceptor{ + Bitbucket: &triggersv1.BitbucketInterceptor{ SecretRef: &triggersv1.SecretRef{ SecretKey: "secretKey", SecretName: "secret", @@ -1001,7 +1001,7 @@ func getResources(t *testing.T, triggerBindingParam string) (*v1alpha1.TriggerBi Type: pipelinev1.PipelineResourceTypeGit, Params: []pipelinev1.ResourceParam{{ Name: "url", - Value: "$(params.url)", + Value: "$(tt.params.url)", }}, }, } diff --git a/vendor/github.com/tektoncd/triggers/pkg/template/event.go b/vendor/github.com/tektoncd/triggers/pkg/template/event.go index 42235ee6759..463268c5db0 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/template/event.go +++ b/vendor/github.com/tektoncd/triggers/pkg/template/event.go @@ -38,7 +38,12 @@ func ResolveParams(rt ResolvedTrigger, body []byte, header http.Header) ([]trigg return nil, fmt.Errorf("failed to ApplyEventValuesToParams: %w", err) } - return MergeInDefaultParams(out, rt.TriggerTemplate.Spec.Params), nil + var ttParams []triggersv1.ParamSpec + if rt.TriggerTemplate != nil { + ttParams = rt.TriggerTemplate.Spec.Params + } + + return MergeInDefaultParams(out, ttParams), nil } // ResolveResources resolves a templated resource by replacing params with their values. diff --git a/vendor/github.com/tektoncd/triggers/pkg/template/event_test.go b/vendor/github.com/tektoncd/triggers/pkg/template/event_test.go index 5e897684ec2..35e81689e8e 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/template/event_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/template/event_test.go @@ -395,8 +395,8 @@ func TestResolveResources(t *testing.T) { template: bldr.TriggerTemplate("tt", ns, bldr.TriggerTemplateSpec( bldr.TriggerTemplateParam("p1", "desc", ""), bldr.TriggerTemplateParam("p2", "desc", ""), - bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(params.p1)-$(params.p2)"}`)}), - bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt2": "$(params.p1)-$(params.p2)"}`)}), + bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(tt.params.p1)-$(tt.params.p2)"}`)}), + bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt2": "$(tt.params.p1)-$(tt.params.p2)"}`)}), )), params: []triggersv1.Param{ bldr.Param("p1", "val1"), @@ -410,7 +410,7 @@ func TestResolveResources(t *testing.T) { name: "replace JSON string in templates", template: bldr.TriggerTemplate("tt", ns, bldr.TriggerTemplateSpec( bldr.TriggerTemplateParam("p1", "desc", ""), - bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(params.p1)"}`)}), + bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(tt.params.p1)"}`)}), )), params: []triggersv1.Param{ bldr.Param("p1", `{"a": "b"}`), @@ -423,7 +423,7 @@ func TestResolveResources(t *testing.T) { name: "replace JSON string with special chars in templates", template: bldr.TriggerTemplate("tt", ns, bldr.TriggerTemplateSpec( bldr.TriggerTemplateParam("p1", "desc", ""), - bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(params.p1)"}`)}), + bldr.TriggerResourceTemplate(runtime.RawExtension{Raw: []byte(`{"rt1": "$(tt.params.p1)"}`)}), )), params: []triggersv1.Param{ bldr.Param("p1", `{"a": "v\\r\\n烈"}`), diff --git a/vendor/github.com/tektoncd/triggers/pkg/template/resource.go b/vendor/github.com/tektoncd/triggers/pkg/template/resource.go index 2f27dea271c..8342fde06d6 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/template/resource.go +++ b/vendor/github.com/tektoncd/triggers/pkg/template/resource.go @@ -110,12 +110,17 @@ func ApplyParamsToResourceTemplate(params []triggersv1.Param, rt json.RawMessage // applyParamToResourceTemplate returns the TriggerResourceTemplate with the // param value substituted for all matching param variables in the template func applyParamToResourceTemplate(param triggersv1.Param, rt json.RawMessage) json.RawMessage { - // Assume the param is valid - paramVariable := fmt.Sprintf("$(params.%s)", param.Name) - // Escape quotes so that that JSON strings can be appended to regular strings. - // See #257 for discussion on this behavior. - paramValue := strings.Replace(param.Value, `"`, `\"`, -1) - return bytes.Replace(rt, []byte(paramVariable), []byte(paramValue), -1) + // The changes are for backward compatibility with both $(params) and $(tt.params) + // TODO(#606) + for _, tag := range []string{"params", "tt.params"} { + // Assume the param is valid + paramVariable := fmt.Sprintf("$(%s.%s)", tag, param.Name) + // Escape quotes so that that JSON strings can be appended to regular strings. + // See #257 for discussion on this behavior. + paramValue := strings.Replace(param.Value, `"`, `\"`, -1) + rt = bytes.Replace(rt, []byte(paramVariable), []byte(paramValue), -1) + } + return rt } // UID generates a random string like the Kubernetes apiserver generateName metafield postfix. diff --git a/vendor/github.com/tektoncd/triggers/pkg/template/resource_test.go b/vendor/github.com/tektoncd/triggers/pkg/template/resource_test.go index 1c191f9487b..57bd394f609 100644 --- a/vendor/github.com/tektoncd/triggers/pkg/template/resource_test.go +++ b/vendor/github.com/tektoncd/triggers/pkg/template/resource_test.go @@ -126,11 +126,11 @@ func Test_applyParamToResourceTemplate(t *testing.T) { } rtNoParamVars = json.RawMessage(`{"foo": "bar"}`) wantRtNoParamVars = json.RawMessage(`{"foo": "bar"}`) - rtNoMatchingParamVars = json.RawMessage(`{"foo": "$(params.no.matching.path)"}`) - wantRtNoMatchingParamVars = json.RawMessage(`{"foo": "$(params.no.matching.path)"}`) - rtOneParamVar = json.RawMessage(`{"foo": "bar-$(params.oneid)-bar"}`) + rtNoMatchingParamVars = json.RawMessage(`{"foo": "$(tt.params.no.matching.path)"}`) + wantRtNoMatchingParamVars = json.RawMessage(`{"foo": "$(tt.params.no.matching.path)"}`) + rtOneParamVar = json.RawMessage(`{"foo": "bar-$(tt.params.oneid)-bar"}`) wantRtOneParamVar = json.RawMessage(`{"foo": "bar-onevalue-bar"}`) - rtMultipleParamVars = json.RawMessage(`{"$(params.oneid)": "bar-$(params.oneid)-$(params.oneid)$(params.oneid)$(params.oneid)-$(params.oneid)-bar"}`) + rtMultipleParamVars = json.RawMessage(`{"$(tt.params.oneid)": "bar-$(tt.params.oneid)-$(tt.params.oneid)$(tt.params.oneid)$(tt.params.oneid)-$(tt.params.oneid)-bar"}`) wantRtMultipleParamVars = json.RawMessage(`{"onevalue": "bar-onevalue-onevalueonevalueonevalue-onevalue-bar"}`) ) type args struct { @@ -180,9 +180,19 @@ func Test_applyParamToResourceTemplate(t *testing.T) { Name: "p1", Value: `{"a":"b"}`, }, - rt: json.RawMessage(`{"foo": "$(params.p1)"}`), + rt: json.RawMessage(`{"foo": "$(tt.params.p1)"}`), }, want: json.RawMessage(`{"foo": "{\"a\":\"b\"}"}`), + }, { + name: "deprecated params in resourcetemplate", + args: args{ + param: triggersv1.Param{ + Name: "p1", + Value: `{"a":"b"}`, + }, + rt: json.RawMessage(`{"p1": "$(params.p1)"}`), + }, + want: json.RawMessage(`{"p1": "{\"a\":\"b\"}"}`), }, } for _, tt := range tests { @@ -196,7 +206,10 @@ func Test_applyParamToResourceTemplate(t *testing.T) { } func Test_ApplyParamsToResourceTemplate(t *testing.T) { - rt := json.RawMessage(`{"oneparam": "$(params.oneid)", "twoparam": "$(params.twoid)", "threeparam": "$(params.threeid)"`) + rt := json.RawMessage(`{"oneparam": "$(tt.params.oneid)", "twoparam": "$(tt.params.twoid)", "threeparam": "$(tt.params.threeid)"`) + rt1 := json.RawMessage(`{"deprecatedParam": "$(params.oneid)"`) + rt2 := json.RawMessage(`{"actualParam": "$(tt.params.oneid)", "deprecatedParam": "$(params.twoid)"`) + rt3 := json.RawMessage(`{"actualParam": "$(tt.params.oneid)", "invalidParam": "$(tt.params1.invalidid)", "deprecatedParam": "$(params.twoid)"`) type args struct { params []triggersv1.Param rt json.RawMessage @@ -222,7 +235,7 @@ func Test_ApplyParamsToResourceTemplate(t *testing.T) { }, rt: rt, }, - want: json.RawMessage(`{"oneparam": "onevalue", "twoparam": "$(params.twoid)", "threeparam": "$(params.threeid)"`), + want: json.RawMessage(`{"oneparam": "onevalue", "twoparam": "$(tt.params.twoid)", "threeparam": "$(tt.params.threeid)"`), }, { name: "multiple params", @@ -236,6 +249,39 @@ func Test_ApplyParamsToResourceTemplate(t *testing.T) { }, want: json.RawMessage(`{"oneparam": "onevalue", "twoparam": "twovalue", "threeparam": "threevalue"`), }, + { + name: "deprecated params", + args: args{ + params: []triggersv1.Param{ + {Name: "oneid", Value: "deprecatedParamValue"}, + }, + rt: rt1, + }, + want: json.RawMessage(`{"deprecatedParam": "deprecatedParamValue"`), + }, + { + name: "both params and tt.params together", + args: args{ + params: []triggersv1.Param{ + {Name: "oneid", Value: "actualValue"}, + {Name: "twoid", Value: "deprecatedParamValue"}, + }, + rt: rt2, + }, + want: json.RawMessage(`{"actualParam": "actualValue", "deprecatedParam": "deprecatedParamValue"`), + }, + { + name: "valid and invalid params together", + args: args{ + params: []triggersv1.Param{ + {Name: "oneid", Value: "actualValue"}, + {Name: "invalidid", Value: "invalidValue"}, + {Name: "twoid", Value: "deprecatedParamValue"}, + }, + rt: rt3, + }, + want: json.RawMessage(`{"actualParam": "actualValue", "invalidParam": "$(tt.params1.invalidid)", "deprecatedParam": "deprecatedParamValue"`), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/vendor/github.com/tektoncd/triggers/tekton/publish.yaml b/vendor/github.com/tektoncd/triggers/tekton/publish.yaml index 0249b49a15c..b2864306281 100644 --- a/vendor/github.com/tektoncd/triggers/tekton/publish.yaml +++ b/vendor/github.com/tektoncd/triggers/tekton/publish.yaml @@ -56,13 +56,14 @@ spec: value: $(inputs.params.imageRegistry) - name: GOPATH value: /workspace/go - - name: CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE + - name: GOOGLE_APPLICATION_CREDENTIALS value: /secret/release.json script: | #!/bin/sh set -ex - - # Auth with CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE + # Activate service account + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} + # Setup docker-auth gcloud auth configure-docker # ko requires this variable to be set in order to set image creation timestamps correctly https://github.com/google/go-containerregistry/pull/146 @@ -110,12 +111,12 @@ spec: REGIONS=(us eu asia) IMAGES=( - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtControllerImage.url) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtWebhookImage.url) - $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtEventListenerSinkImage.url) + $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtControllerImage.url):$(inputs.params.versionTag) + $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtWebhookImage.url):$(inputs.params.versionTag) + $(inputs.params.imageRegistry)/$(inputs.params.pathToProject)/$(outputs.resources.builtEventListenerSinkImage.url):$(inputs.params.versionTag) ) # Parse the built images from the release.yaml generated by ko - BUILT_IMAGES=( $(/workspace/go/src/github.com/tektoncd/triggers/tekton/koparse/koparse.py --path /workspace/output/bucket/latest/release.yaml --base $(inputs.params.imageRegistry)/$(inputs.params.pathToProject) --images ${IMAGES[@]}) ) + BUILT_IMAGES=( $(/workspace/go/src/github.com/tektoncd/triggers/tekton/koparse/koparse.py --path /workspace/output/bucket/previous/$(inputs.params.versionTag)/release.yaml --base $(inputs.params.imageRegistry)/$(inputs.params.pathToProject) --images ${IMAGES[@]}) ) # Auth with account credentials gcloud auth activate-service-account --key-file=/secret/release.json diff --git a/vendor/github.com/tektoncd/triggers/test/buffer.go b/vendor/github.com/tektoncd/triggers/test/buffer.go new file mode 100644 index 00000000000..2b62f100a11 --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/test/buffer.go @@ -0,0 +1,45 @@ +// +build e2e + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "bytes" + "sync" +) + +// Buffer is the thread safe Buffer implementation +type Buffer struct { + b bytes.Buffer + m sync.Mutex +} + +// Write to the Buffer in a thread safe manner +func (b *Buffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +// String returns the contents of the unread portion of the buffer +// as a string in a thread safe manner. +func (b *Buffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} diff --git a/vendor/github.com/tektoncd/triggers/test/builder/eventlistener.go b/vendor/github.com/tektoncd/triggers/test/builder/eventlistener.go index e4d00c62e28..568ea144327 100644 --- a/vendor/github.com/tektoncd/triggers/test/builder/eventlistener.go +++ b/vendor/github.com/tektoncd/triggers/test/builder/eventlistener.go @@ -31,6 +31,9 @@ type EventListenerOp func(*v1alpha1.EventListener) // EventListenerSpecOp is an operation which modifies the EventListenerSpec. type EventListenerSpecOp func(*v1alpha1.EventListenerSpec) +// EventListenerPodTemplateOp is an operation which modifies the PodTemplate. +type EventListenerPodTemplateOp func(*v1alpha1.PodTemplate) + // EventListenerStatusOp is an operation which modifies the EventListenerStatus. type EventListenerStatusOp func(*v1alpha1.EventListenerStatus) @@ -89,6 +92,31 @@ func EventListenerServiceAccount(saName string) EventListenerSpecOp { } } +// EventListenerPodTemplate sets the specified pod template of the EventListener. +func EventListenerPodTemplate(podTemplate v1alpha1.PodTemplate) EventListenerSpecOp { + return func(spec *v1alpha1.EventListenerSpec) { + spec.PodTemplate = podTemplate + } +} + +// EventListenerPodTemplateSpec creates an PodTemplate. +// Any number of EventListenerPodTemplateOp modifiers can be passed to transform it. +func EventListenerPodTemplateSpec(ops ...EventListenerPodTemplateOp) v1alpha1.PodTemplate { + pt := v1alpha1.PodTemplate{} + for _, op := range ops { + op(&pt) + } + + return pt +} + +// EventListenerPodTemplateTolerations sets the specified Tolerations of the EventListener PodTemplate. +func EventListenerPodTemplateTolerations(tolerations []corev1.Toleration) EventListenerPodTemplateOp { + return func(pt *v1alpha1.PodTemplate) { + pt.Tolerations = tolerations + } +} + // EventListenerTrigger adds an EventListenerTrigger to the EventListenerSpec Triggers. // Any number of EventListenerTriggerOp modifiers can be passed to create/modify it. func EventListenerTrigger(ttName, apiVersion string, ops ...EventListenerTriggerOp) EventListenerSpecOp { diff --git a/vendor/github.com/tektoncd/triggers/test/eventlistener_scale_test.go b/vendor/github.com/tektoncd/triggers/test/eventlistener_scale_test.go index 30dfc7fd3a7..55293e413a5 100644 --- a/vendor/github.com/tektoncd/triggers/test/eventlistener_scale_test.go +++ b/vendor/github.com/tektoncd/triggers/test/eventlistener_scale_test.go @@ -47,8 +47,20 @@ func TestEventListenerScale(t *testing.T) { // Create an EventListener with 1000 Triggers var err error el := bldr.EventListener("my-eventlistener", namespace, bldr.EventListenerSpec( - bldr.EventListenerServiceAccount(saName)), - ) + bldr.EventListenerServiceAccount(saName), + bldr.EventListenerPodTemplate( + bldr.EventListenerPodTemplateSpec( + bldr.EventListenerPodTemplateTolerations([]corev1.Toleration{ + { + Key: "key", + Operator: "Equal", + Value: "value", + Effect: "NoSchedule", + }, + }), + ), + ), + )) for i := 0; i < 1000; i++ { trigger := bldr.Trigger("my-triggertemplate", "v1alpha1", diff --git a/vendor/github.com/tektoncd/triggers/test/eventlistener_test.go b/vendor/github.com/tektoncd/triggers/test/eventlistener_test.go index e14c4c33aed..ba5f6cbf047 100644 --- a/vendor/github.com/tektoncd/triggers/test/eventlistener_test.go +++ b/vendor/github.com/tektoncd/triggers/test/eventlistener_test.go @@ -85,7 +85,7 @@ func TestEventListenerCreate(t *testing.T) { Name: "pr1", Namespace: namespace, Labels: map[string]string{ - "$(params.oneparam)": "$(params.oneparam)", + "$(tt.params.oneparam)": "$(tt.params.oneparam)", }, }, Spec: v1alpha1.PipelineResourceSpec{ @@ -107,15 +107,15 @@ func TestEventListenerCreate(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pr2", Labels: map[string]string{ - "$(params.twoparamname)": "$(params.twoparamvalue)", + "$(tt.params.twoparamname)": "$(tt.params.twoparamvalue)", }, }, Spec: v1alpha1.PipelineResourceSpec{ Type: "git", Params: []v1alpha1.ResourceParam{ - {Name: "license", Value: "$(params.license)"}, - {Name: "header", Value: "$(params.header)"}, - {Name: "prmessage", Value: "$(params.prmessage)"}, + {Name: "license", Value: "$(tt.params.license)"}, + {Name: "header", Value: "$(tt.params.header)"}, + {Name: "prmessage", Value: "$(tt.params.prmessage)"}, }, }, } @@ -230,6 +230,18 @@ func TestEventListenerCreate(t *testing.T) { ), bldr.EventListenerSpec( bldr.EventListenerServiceAccount(sa.Name), + bldr.EventListenerPodTemplate( + bldr.EventListenerPodTemplateSpec( + bldr.EventListenerPodTemplateTolerations([]corev1.Toleration{ + { + Key: "key", + Operator: "Equal", + Value: "value", + Effect: "NoSchedule", + }, + }), + ), + ), bldr.EventListenerTrigger(tt.Name, "", bldr.EventListenerTriggerBinding(tb.Name, "", tb.Name, "v1alpha1"), bldr.EventListenerTriggerBinding(ctb.Name, "ClusterTriggerBinding", ctb.Name, "v1alpha1"), @@ -326,7 +338,7 @@ func TestEventListenerCreate(t *testing.T) { hostIP := strings.TrimPrefix(config.Host, "https://") serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL) - out, errOut := new(bytes.Buffer), new(bytes.Buffer) + out, errOut := new(Buffer), new(Buffer) readyChan := make(chan struct{}, 1) forwarder, err := portforward.New(dialer, []string{portString}, stopChan, readyChan, out, errOut) if err != nil { diff --git a/vendor/github.com/tektoncd/triggers/third_party/LICENSE b/vendor/github.com/tektoncd/triggers/third_party/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/third_party/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tektoncd/triggers/third_party/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/tektoncd/triggers/third_party/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 00000000000..298f0e2665e --- /dev/null +++ b/vendor/github.com/tektoncd/triggers/third_party/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/k8s.io/apiserver/Godeps/Godeps.json b/vendor/k8s.io/apiserver/Godeps/Godeps.json index eeee730e548..3c4c3653703 100644 --- a/vendor/k8s.io/apiserver/Godeps/Godeps.json +++ b/vendor/k8s.io/apiserver/Godeps/Godeps.json @@ -512,7 +512,7 @@ }, { "ImportPath": "go.etcd.io/bbolt", - "Rev": "v1.3.3" + "Rev": "v1.3.5" }, { "ImportPath": "go.etcd.io/etcd", @@ -556,7 +556,7 @@ }, { "ImportPath": "golang.org/x/mod", - "Rev": "v0.1.0" + "Rev": "c90efee705ee" }, { "ImportPath": "golang.org/x/net", @@ -572,11 +572,11 @@ }, { "ImportPath": "golang.org/x/sys", - "Rev": "fde4db37ae7a" + "Rev": "915c9c3d4ccf" }, { "ImportPath": "golang.org/x/text", - "Rev": "v0.3.2" + "Rev": "v0.3.3" }, { "ImportPath": "golang.org/x/time", @@ -584,7 +584,7 @@ }, { "ImportPath": "golang.org/x/tools", - "Rev": "65e3620a7ae7" + "Rev": "7b8e75db28f4" }, { "ImportPath": "golang.org/x/xerrors", @@ -668,19 +668,19 @@ }, { "ImportPath": "k8s.io/api", - "Rev": "8dc28b782f8e" + "Rev": "9a9335f05b18" }, { "ImportPath": "k8s.io/apimachinery", - "Rev": "c1bd2c2a276f" + "Rev": "eed6a5257d68" }, { "ImportPath": "k8s.io/client-go", - "Rev": "3c831c893e8a" + "Rev": "5fade4ec57c6" }, { "ImportPath": "k8s.io/component-base", - "Rev": "6c60a4d2ed11" + "Rev": "167327cfd813" }, { "ImportPath": "k8s.io/gengo", @@ -696,7 +696,7 @@ }, { "ImportPath": "k8s.io/utils", - "Rev": "2df71ebbae66" + "Rev": "6e3d28b6ed19" }, { "ImportPath": "rsc.io/binaryregexp", diff --git a/vendor/k8s.io/apiserver/SECURITY_CONTACTS b/vendor/k8s.io/apiserver/SECURITY_CONTACTS index 6df6a4d6a16..3888846b1a6 100644 --- a/vendor/k8s.io/apiserver/SECURITY_CONTACTS +++ b/vendor/k8s.io/apiserver/SECURITY_CONTACTS @@ -14,4 +14,5 @@ cjcullen joelsmith liggitt philips +sttts tallclair diff --git a/vendor/k8s.io/apiserver/go.mod b/vendor/k8s.io/apiserver/go.mod index 508661f77b8..58544c1023e 100644 --- a/vendor/k8s.io/apiserver/go.mod +++ b/vendor/k8s.io/apiserver/go.mod @@ -2,7 +2,7 @@ module k8s.io/apiserver -go 1.13 +go 1.14 require ( github.com/coreos/go-oidc v2.1.0+incompatible @@ -30,6 +30,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.4.0 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect + go.etcd.io/bbolt v1.3.5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f go.uber.org/zap v1.10.0 golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 @@ -41,23 +42,22 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/yaml.v2 v2.2.8 - k8s.io/api v0.0.0-20200617050422-8dc28b782f8e - k8s.io/apimachinery v0.0.0-20200617050244-c1bd2c2a276f - k8s.io/client-go v0.0.0-20200617050643-3c831c893e8a - k8s.io/component-base v0.0.0-20200617051220-6c60a4d2ed11 + k8s.io/api v0.0.0-20200624085918-9a9335f05b18 + k8s.io/apimachinery v0.0.0-20200624084815-eed6a5257d68 + k8s.io/client-go v0.0.0-20200624090346-5fade4ec57c6 + k8s.io/component-base v0.0.0-20200625172041-167327cfd813 k8s.io/klog/v2 v2.1.0 k8s.io/kube-openapi v0.0.0-20200427153329-656914f816f9 - k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 + k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 sigs.k8s.io/structured-merge-diff/v3 v3.0.0 sigs.k8s.io/yaml v1.2.0 ) replace ( - golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 - golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 - k8s.io/api => k8s.io/api v0.0.0-20200617050422-8dc28b782f8e - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20200617050244-c1bd2c2a276f - k8s.io/client-go => k8s.io/client-go v0.0.0-20200617050643-3c831c893e8a - k8s.io/component-base => k8s.io/component-base v0.0.0-20200617051220-6c60a4d2ed11 + golang.org/x/sys => golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf // pinned to release-branch.go1.14-std + k8s.io/api => k8s.io/api v0.0.0-20200624085918-9a9335f05b18 + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20200624084815-eed6a5257d68 + k8s.io/client-go => k8s.io/client-go v0.0.0-20200624090346-5fade4ec57c6 + k8s.io/component-base => k8s.io/component-base v0.0.0-20200625172041-167327cfd813 ) diff --git a/vendor/k8s.io/apiserver/go.sum b/vendor/k8s.io/apiserver/go.sum index 59293b1e5f4..2ca813982c6 100644 --- a/vendor/k8s.io/apiserver/go.sum +++ b/vendor/k8s.io/apiserver/go.sum @@ -295,6 +295,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f h1:pBCD+Z7cy5WPTq+R6MmJJvDRpn88cp7bmTypBsn91g4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -312,6 +314,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -333,6 +336,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -364,19 +368,43 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf h1:+4j7oujXP478CVb/AFvHJmVX5+Pczx2NGts5yirA0oY= +golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -446,10 +474,10 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20200617050422-8dc28b782f8e/go.mod h1:kD7EG+Wyq5Fu0MAEBkXyQVnnjyj/ChS5/vewKVRDaYI= -k8s.io/apimachinery v0.0.0-20200617050244-c1bd2c2a276f/go.mod h1:OO1oUmqmYEvK9/2GigG7tpElec1NAKGniAPppy1PJGA= -k8s.io/client-go v0.0.0-20200617050643-3c831c893e8a/go.mod h1:2QsvbX+hSBkSjVpimrdvrpStagCTqdSQxNnzGIxk1YY= -k8s.io/component-base v0.0.0-20200617051220-6c60a4d2ed11/go.mod h1:SIJ3hZh19C0QhquEIfpd0PWNnQPXJvrH9ClSZPF25sI= +k8s.io/api v0.0.0-20200624085918-9a9335f05b18/go.mod h1:0NI05lw6YVHu83nOP2Ayou4el+63LIYkL/anfX/Xgeg= +k8s.io/apimachinery v0.0.0-20200624084815-eed6a5257d68/go.mod h1:FzS272GL9jln/XO9XVCgkNL+ufOU3vbiON3UVVLEc3k= +k8s.io/client-go v0.0.0-20200624090346-5fade4ec57c6/go.mod h1:jpUGzcnVu/QnjuYN791+/yQHFM5FutZrtWbn149vE4A= +k8s.io/component-base v0.0.0-20200625172041-167327cfd813/go.mod h1:yoTLAXZ+ykvlUH2ZyM6lx10z9pIVpxKkHtQvNTDZ0hA= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -457,8 +485,8 @@ k8s.io/klog/v2 v2.1.0 h1:X3+Mru/L3jy4BI4vcAYkHvL6PyU+QBsuhEqwlI4mgkA= k8s.io/klog/v2 v2.1.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20200427153329-656914f816f9 h1:5NC2ITmvg8RoxoH0wgmL4zn4VZqXGsKbxrikjaQx6s4= k8s.io/kube-openapi v0.0.0-20200427153329-656914f816f9/go.mod h1:bfCVj+qXcEaE5SCvzBaqpOySr6tuCcpPKqF6HD8nyCw= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 h1:Ly1Oxdu5p5ZFmiVT71LFgeZETvMfZ1iBIGeOenT2JeM= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 h1:7Nu2dTj82c6IaWvL7hImJzcXoTPz1MsSCH7r+0m6rfo= +k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go index d911d05972c..966ff1f0d15 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation.go @@ -31,6 +31,7 @@ const ( keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v" unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported" atLeastOneRequiredErrFmt = "at least one %s is required" + invalidURLErrFmt = "invalid endpoint for kms provider, error: parse %s: net/url: invalid control character in URL" mandatoryFieldErrFmt = "%s is a mandatory field for a %s" base64EncodingErr = "secrets must be base64 encoded" zeroOrNegativeErrFmt = "%s should be a positive value" diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation_test.go b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation_test.go index 03192c83731..2c018016961 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation_test.go +++ b/vendor/k8s.io/apiserver/pkg/apis/config/validation/validation_test.go @@ -296,7 +296,7 @@ func TestKMSEndpoint(t *testing.T) { desc: "invalid url", in: &config.KMSConfiguration{Endpoint: "unix:///foo\n.socket"}, want: field.ErrorList{ - field.Invalid(endpointField, "unix:///foo\n.socket", "invalid endpoint for kms provider, error: parse unix:///foo\n.socket: net/url: invalid control character in URL"), + field.Invalid(endpointField, "unix:///foo\n.socket", fmt.Sprintf(invalidURLErrFmt, `"unix:///foo\n.socket"`)), }, }, } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go index b2f3720d486..a10564f04d9 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -34,7 +34,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" utilclock "k8s.io/apimachinery/pkg/util/clock" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/klog/v2" ) @@ -47,6 +50,16 @@ type cacheRecord struct { resp *authenticator.Response ok bool err error + + // this cache assumes token authn has no side-effects or temporal dependence. + // neither of these are true for audit annotations set via AddAuditAnnotation. + // + // for audit annotations, the assumption is that for some period of time (cache TTL), + // all requests with the same API audiences and the same bearer token result in the + // same annotations. This may not be true if the authenticator sets an annotation + // based on the current time, but that may be okay since cache TTLs are generally + // small (seconds). + annotations map[string]string } type cachedTokenAuthenticator struct { @@ -109,6 +122,17 @@ func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, // AuthenticateToken implements authenticator.Token func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + record := a.doAuthenticateToken(ctx, token) + if !record.ok || record.err != nil { + return nil, false, record.err + } + for key, value := range record.annotations { + audit.AddAuditAnnotation(ctx, key, value) + } + return record.resp, true, nil +} + +func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, token string) *cacheRecord { doneAuthenticating := stats.authenticating() auds, audsOk := authenticator.AudiencesFrom(ctx) @@ -117,7 +141,7 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token if record, ok := a.cache.get(key); ok { // Record cache hit doneAuthenticating(true) - return record.resp, record.ok, record.err + return record } // Record cache miss @@ -125,18 +149,19 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token defer doneBlocking() defer doneAuthenticating(false) - type lookup struct { - resp *authenticator.Response - ok bool - } + c := a.group.DoChan(key, func() (val interface{}, _ error) { + // always use one place to read and write the output of AuthenticateToken + record := &cacheRecord{} - c := a.group.DoChan(key, func() (val interface{}, err error) { doneFetching := stats.fetching() // We're leaving the request handling stack so we need to handle crashes // ourselves. Log a stack trace and return a 500 if something panics. defer func() { if r := recover(); r != nil { - err = errAuthnCrash + // make sure to always return a record + record.err = errAuthnCrash + val = record + // Same as stdlib http server code. Manually allocate stack // trace buffer size to prevent excessively large logs const size = 64 << 10 @@ -144,12 +169,12 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token buf = buf[:runtime.Stack(buf, false)] klog.Errorf("%v\n%s", r, buf) } - doneFetching(err == nil) + doneFetching(record.err == nil) }() // Check again for a cached record. We may have raced with a fetch. if record, ok := a.cache.get(key); ok { - return lookup{record.resp, record.ok}, record.err + return record, nil } // Detach the context because the lookup may be shared by multiple callers, @@ -161,29 +186,35 @@ func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token ctx = authenticator.WithAudiences(ctx, auds) } - resp, ok, err := a.authenticator.AuthenticateToken(ctx, token) - if !a.cacheErrs && err != nil { - return nil, err + // since this is shared work between multiple requests, we have no way of knowing if any + // particular request supports audit annotations. thus we always attempt to record them. + ev := &auditinternal.Event{Level: auditinternal.LevelMetadata} + ctx = request.WithAuditEvent(ctx, ev) + + record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token) + record.annotations = ev.Annotations + + if !a.cacheErrs && record.err != nil { + return record, nil } switch { - case ok && a.successTTL > 0: - a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.successTTL) - case !ok && a.failureTTL > 0: - a.cache.set(key, &cacheRecord{resp: resp, ok: ok, err: err}, a.failureTTL) + case record.ok && a.successTTL > 0: + a.cache.set(key, record, a.successTTL) + case !record.ok && a.failureTTL > 0: + a.cache.set(key, record, a.failureTTL) } - return lookup{resp, ok}, err + + return record, nil }) select { case result := <-c: - if result.Err != nil { - return nil, false, result.Err - } - lookup := result.Val.(lookup) - return lookup.resp, lookup.ok, nil + // we always set Val and never set Err + return result.Val.(*cacheRecord) case <-ctx.Done(): - return nil, false, ctx.Err() + // fake a record on context cancel + return &cacheRecord{err: ctx.Err()} } } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator_test.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator_test.go index 7252a0a24d4..ed3abfc1d1f 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator_test.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator_test.go @@ -33,8 +33,11 @@ import ( "github.com/google/go-cmp/cmp" utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/uuid" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" ) func TestCachedTokenAuthenticator(t *testing.T) { @@ -274,6 +277,144 @@ func TestSharedLookup(t *testing.T) { }) } +func TestCachedAuditAnnotations(t *testing.T) { + snorlax := &authenticator.Response{User: &user.DefaultInfo{Name: "snorlax"}} + + t.Run("annotations from cache", func(t *testing.T) { + var lookups uint32 + c := make(chan struct{}) + a := New(authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) { + <-c + atomic.AddUint32(&lookups, 1) + audit.AddAuditAnnotation(ctx, "snorlax", "rocks") + audit.AddAuditAnnotation(ctx, "pandas", "are amazing") + return snorlax, true, nil + }), false, time.Minute, 0) + + allAnnotations := make(chan map[string]string, 10) + defer close(allAnnotations) + + var wg sync.WaitGroup + for i := 0; i < cap(allAnnotations); i++ { + wg.Add(1) + go func() { + defer wg.Done() + + // exercise both ways of tracking audit annotations + r := mathrand.New(mathrand.NewSource(mathrand.Int63())) + randomChoice := r.Int()%2 == 0 + ctx := context.Background() + + if randomChoice { + ctx = audit.WithAuditAnnotations(ctx) + } else { + ctx = request.WithAuditEvent(ctx, &auditinternal.Event{Level: auditinternal.LevelMetadata}) + } + + _, _, _ = a.AuthenticateToken(ctx, "token") + + if randomChoice { + allAnnotations <- extractAnnotations(ctx) + } else { + allAnnotations <- request.AuditEventFrom(ctx).Annotations + } + }() + } + + // no good way to make sure that all the callers are queued so we sleep. + time.Sleep(1 * time.Second) + close(c) + wg.Wait() + + want := map[string]string{"snorlax": "rocks", "pandas": "are amazing"} + for i := 0; i < cap(allAnnotations); i++ { + annotations := <-allAnnotations + if diff := cmp.Diff(want, annotations); diff != "" { + t.Errorf("%d: unexpected annotations (-want +got): %s", i, diff) + } + } + + if queued := len(allAnnotations); queued != 0 { + t.Errorf("expected all annoations to be processed: %d", queued) + } + + if lookups > 3 { + t.Errorf("unexpected number of lookups: got=%d, wanted less than 3", lookups) + } + }) + + t.Run("annotations do not change during cache TTL", func(t *testing.T) { + a := New(authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) { + audit.AddAuditAnnotation(ctx, "timestamp", time.Now().String()) + return snorlax, true, nil + }), false, time.Minute, 0) + + allAnnotations := make([]map[string]string, 0, 10) + + for i := 0; i < cap(allAnnotations); i++ { + ctx := audit.WithAuditAnnotations(context.Background()) + _, _, _ = a.AuthenticateToken(ctx, "token") + allAnnotations = append(allAnnotations, extractAnnotations(ctx)) + } + + if len(allAnnotations) != cap(allAnnotations) { + t.Errorf("failed to process all annotations") + } + + want := allAnnotations[0] + if ok := len(want) == 1 && len(want["timestamp"]) > 0; !ok { + t.Errorf("invalid annotations: %v", want) + } + + for i, annotations := range allAnnotations[1:] { + if diff := cmp.Diff(want, annotations); diff != "" { + t.Errorf("%d: unexpected annotations (-want +got): %s", i, diff) + } + } + }) + + t.Run("different tokens can have different annotations", func(t *testing.T) { + a := New(authenticator.TokenFunc(func(ctx context.Context, token string) (*authenticator.Response, bool, error) { + audit.AddAuditAnnotation(ctx, "timestamp", time.Now().String()) + return snorlax, true, nil + }), false, time.Minute, 0) + + ctx1 := audit.WithAuditAnnotations(context.Background()) + _, _, _ = a.AuthenticateToken(ctx1, "token1") + annotations1 := extractAnnotations(ctx1) + + // guarantee different now times + time.Sleep(time.Second) + + ctx2 := audit.WithAuditAnnotations(context.Background()) + _, _, _ = a.AuthenticateToken(ctx2, "token2") + annotations2 := extractAnnotations(ctx2) + + if ok := len(annotations1) == 1 && len(annotations1["timestamp"]) > 0; !ok { + t.Errorf("invalid annotations 1: %v", annotations1) + } + if ok := len(annotations2) == 1 && len(annotations2["timestamp"]) > 0; !ok { + t.Errorf("invalid annotations 2: %v", annotations2) + } + + if annotations1["timestamp"] == annotations2["timestamp"] { + t.Errorf("annotations should have different timestamp value: %v", annotations1) + } + }) +} + +func extractAnnotations(ctx context.Context) map[string]string { + annotationsSlice := reflect.ValueOf(ctx).Elem().FieldByName("val").Elem().Elem() + annotations := map[string]string{} + for i := 0; i < annotationsSlice.Len(); i++ { + annotation := annotationsSlice.Index(i) + key := annotation.FieldByName("key").String() + val := annotation.FieldByName("value").String() + annotations[key] = val + } + return annotations +} + func BenchmarkCachedTokenAuthenticator(b *testing.B) { tokenCount := []int{100, 500, 2500, 12500, 62500} threadCount := []int{1, 16, 256} @@ -318,6 +459,8 @@ func (s *singleBenchmark) makeTokens() { s.tokenToAuds = map[string]authenticator.Audiences{} s.tokens = []string{} + rr := mathrand.New(mathrand.NewSource(mathrand.Int63())) + for i := 0; i < s.tokenCount; i++ { tok := fmt.Sprintf("%v-%v", jwtToken, i) r := cacheRecord{ @@ -327,14 +470,23 @@ func (s *singleBenchmark) makeTokens() { } // make different combinations of audience, failures, denies for the tokens. auds := []string{} - for i := 0; i < mathrand.Intn(4); i++ { + for i := 0; i < rr.Intn(4); i++ { auds = append(auds, string(uuid.NewUUID())) } - choice := mathrand.Float64() + choice := rr.Float64() switch { case choice < 0.9: r.ok = true r.err = nil + + // add some realistic annotations on ~20% of successful authentications + if f := rr.Float64(); f < 0.2 { + r.annotations = map[string]string{ + "audience.authentication.kubernetes.io": "e8357258-88b1-11ea-bc55-0242ac130003", + "namespace.authentication.kubernetes.io": "kube-system", + "float.authentication.kubernetes.io": fmt.Sprint(f), + } + } case choice < 0.99: r.ok = false r.err = nil @@ -355,6 +507,9 @@ func (s *singleBenchmark) lookup(ctx context.Context, token string) (*authentica if !ok { panic("test setup problem") } + for key, val := range r.annotations { + audit.AddAuditAnnotation(ctx, key, val) + } return r.resp, r.ok, r.err } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index 2f8c8320357..dbae77190bd 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -248,7 +248,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc if timeout == 0 && minRequestTimeout > 0 { timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) } - klog.V(3).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) + klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() watcher, err := rw.Watch(ctx, &opts) diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 9869e60c265..3fe1ebec0f7 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -138,8 +138,9 @@ const ( // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" - // owner: @shaloulcy + // owner: @shaloulcy, @wojtek-t // alpha: v1.18 + // beta: v1.19 // // Allows label and field based indexes in apiserver watch cache to accelerate list operations. SelectorIndex featuregate.Feature = "SelectorIndex" @@ -173,6 +174,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, APIPriorityAndFairness: {Default: false, PreRelease: featuregate.Alpha}, RemoveSelfLink: {Default: false, PreRelease: featuregate.Alpha}, - SelectorIndex: {Default: false, PreRelease: featuregate.Alpha}, + SelectorIndex: {Default: true, PreRelease: featuregate.Beta}, WarningHeaders: {Default: true, PreRelease: featuregate.Beta}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index f50d1a0b32a..64a5ab3f3d8 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -473,6 +473,16 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj // deleteObj is only used in case a deletion is carried out var deleteObj runtime.Object err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + existingResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(existing) + if err != nil { + return nil, nil, err + } + if existingResourceVersion == 0 { + if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { + return nil, nil, apierrors.NewNotFound(qualifiedResource, name) + } + } + // Given the existing object, get the new object obj, err := objInfo.UpdatedObject(ctx, existing) if err != nil { @@ -483,20 +493,13 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj // the user does not have a resource version, then we populate it with // the latest version. Else, we check that the version specified by // the user matches the version of latest storage object. - resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + newResourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } - doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + doUnconditionalUpdate := newResourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() - version, err := e.Storage.Versioner().ObjectResourceVersion(existing) - if err != nil { - return nil, nil, err - } - if version == 0 { - if !e.UpdateStrategy.AllowCreateOnUpdate() && !forceAllowCreate { - return nil, nil, apierrors.NewNotFound(qualifiedResource, name) - } + if existingResourceVersion == 0 { creating = true creatingObj = obj if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { @@ -529,15 +532,15 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj } else { // Check if the object's resource version matches the latest // resource version. - if resourceVersion == 0 { + if newResourceVersion == 0 { // TODO: The Invalid error should have a field for Resource. // After that field is added, we should fill the Resource and // leave the Kind field empty. See the discussion in #18526. qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource} - fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), resourceVersion, "must be specified for an update")} + fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newResourceVersion, "must be specified for an update")} return nil, nil, apierrors.NewInvalid(qualifiedKind, name, fieldErrList) } - if resourceVersion != version { + if newResourceVersion != existingResourceVersion { return nil, nil, apierrors.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) } } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go index 00e6017f0ec..fd1979bb625 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go @@ -1612,7 +1612,6 @@ func newTestGenericStoreRegistry(t *testing.T, scheme *runtime.Scheme, hasCacheE } if hasCacheEnabled { config := cacherstorage.Config{ - CacheCapacity: 10, Storage: s, Versioner: etcd3.APIObjectVersioner{}, ResourcePrefix: podPrefix, diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index ed680f8d5e5..c744cef1bde 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -53,6 +53,7 @@ import ( genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi" apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" genericregistry "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/server/dynamiccertificates" "k8s.io/apiserver/pkg/server/egressselector" @@ -60,6 +61,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/routes" serverstore "k8s.io/apiserver/pkg/server/storage" + "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" @@ -709,6 +711,9 @@ func installAPI(s *GenericAPIServer, c *Config) { if c.EnableDiscovery { s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService()) } + if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + c.FlowControl.Install(s.Handler.NonGoRestfulMux) + } } func NewRequestInfoResolver(c *Config) *apirequest.RequestInfoFactory { diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index 6cffab5b3ff..4cb5306672b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -85,7 +85,9 @@ func WithLogging(handler http.Handler, pred StacktracePred) http.Handler { rl := newLogged(req, w).StacktraceWhen(pred) req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl)) - defer rl.Log() + if klog.V(3).Enabled() { + defer func() { klog.InfoS("HTTP", rl.LogArgs()...) }() + } handler.ServeHTTP(rl, req) }) } @@ -153,24 +155,34 @@ func (rl *respLogger) Addf(format string, data ...interface{}) { rl.addedInfo += "\n" + fmt.Sprintf(format, data...) } -// Log is intended to be called once at the end of your request handler, via defer -func (rl *respLogger) Log() { +func (rl *respLogger) LogArgs() []interface{} { latency := time.Since(rl.startTime) - if klog.V(3).Enabled() { - if !rl.hijacked { - klog.InfoDepth(1, fmt.Sprintf("verb=%q URI=%q latency=%v resp=%v UserAgent=%q srcIP=%q: %v%v", - rl.req.Method, rl.req.RequestURI, - latency, rl.status, - rl.req.UserAgent(), rl.req.RemoteAddr, - rl.statusStack, rl.addedInfo, - )) - } else { - klog.InfoDepth(1, fmt.Sprintf("verb=%q URI=%q latency=%v UserAgent=%q srcIP=%q: hijacked", - rl.req.Method, rl.req.RequestURI, - latency, rl.req.UserAgent(), rl.req.RemoteAddr, - )) + if rl.hijacked { + return []interface{}{ + "verb", rl.req.Method, + "URI", rl.req.RequestURI, + "latency", latency, + "userAgent", rl.req.UserAgent(), + "srcIP", rl.req.RemoteAddr, + "hijacked", true, } } + args := []interface{}{ + "verb", rl.req.Method, + "URI", rl.req.RequestURI, + "latency", latency, + "userAgent", rl.req.UserAgent(), + "srcIP", rl.req.RemoteAddr, + "resp", rl.status, + } + if len(rl.statusStack) > 0 { + args = append(args, "statusStack", rl.statusStack) + } + + if len(rl.addedInfo) > 0 { + args = append(args, "addedInfo", rl.addedInfo) + } + return args } // Header implements http.ResponseWriter. diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go index b5b108ce732..16cd04c6568 100644 --- a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -218,7 +218,7 @@ func RunServer( defer utilruntime.HandleCrash() var listener net.Listener - listener = tcpKeepAliveListener{ln.(*net.TCPListener)} + listener = tcpKeepAliveListener{ln} if server.TLSConfig != nil { listener = tls.NewListener(listener, server.TLSConfig) } @@ -244,15 +244,17 @@ func RunServer( // // Copied from Go 1.7.2 net/http/server.go type tcpKeepAliveListener struct { - *net.TCPListener + net.Listener } func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { - tc, err := ln.AcceptTCP() + c, err := ln.Listener.Accept() if err != nil { return nil, err } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(defaultKeepAlivePeriod) - return tc, nil + if tc, ok := c.(*net.TCPConn); ok { + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(defaultKeepAlivePeriod) + } + return c, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 6cccf80c088..37a06beab69 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -60,11 +60,6 @@ const ( // Config contains the configuration for a given Cache. type Config struct { - // Maximum size of the history cached in memory. - // - // DEPRECATED: Cache capacity is dynamic and this field is no longer used. - CacheCapacity int - // An underlying storage.Interface. Storage storage.Interface @@ -404,6 +399,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) { c.watchCache.SetOnReplace(func() { successfulList = true c.ready.set(true) + klog.V(1).Infof("cacher (%v): initialized", c.objectType.String()) }) defer func() { if successfulList { @@ -417,7 +413,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) { // Also note that startCaching is called in a loop, so there's no need // to have another loop here. if err := c.reflector.ListAndWatch(stopChannel); err != nil { - klog.Errorf("unexpected ListAndWatch error: %v", err) + klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err) } } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go index 41199668085..c13c1140672 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go @@ -259,10 +259,9 @@ func init() { utilruntime.Must(examplev1.AddToScheme(scheme)) } -func newTestCacher(s storage.Interface, cap int) (*Cacher, storage.Versioner, error) { +func newTestCacher(s storage.Interface) (*Cacher, storage.Versioner, error) { prefix := "pods" config := Config{ - CacheCapacity: cap, Storage: s, Versioner: testVersioner{}, ResourcePrefix: prefix, @@ -332,7 +331,7 @@ func (d *dummyStorage) Count(_ string) (int64, error) { func TestListCacheBypass(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 0) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -367,7 +366,7 @@ func TestListCacheBypass(t *testing.T) { func TestGetToListCacheBypass(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 0) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -402,7 +401,7 @@ func TestGetToListCacheBypass(t *testing.T) { func TestGetCacheBypass(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -434,7 +433,7 @@ func TestGetCacheBypass(t *testing.T) { func TestWatcherNotGoingBackInTime(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -558,7 +557,7 @@ func TestCacheWatcherStoppedInAnotherGoroutine(t *testing.T) { func TestCacheWatcherStoppedOnDestroy(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -640,7 +639,7 @@ func TestTimeBucketWatchersBasic(t *testing.T) { func TestCacherNoLeakWithMultipleWatchers(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchBookmark, true)() backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -708,7 +707,7 @@ func TestCacherNoLeakWithMultipleWatchers(t *testing.T) { func testCacherSendBookmarkEvents(t *testing.T, allowWatchBookmarks, expectedBookmarks bool) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -796,7 +795,7 @@ func TestCacherSendBookmarkEvents(t *testing.T) { func TestCacherSendsMultipleWatchBookmarks(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -868,7 +867,7 @@ func TestCacherSendsMultipleWatchBookmarks(t *testing.T) { func TestDispatchingBookmarkEventsWithConcurrentStop(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchBookmark, true)() backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -941,7 +940,7 @@ func TestDispatchingBookmarkEventsWithConcurrentStop(t *testing.T) { func TestDispatchEventWillNotBeBlockedByTimedOutWatcher(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -1040,7 +1039,7 @@ func verifyEvents(t *testing.T, w watch.Interface, events []watch.Event) { func TestCachingDeleteEvents(t *testing.T) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 1000) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -1120,7 +1119,7 @@ func TestCachingDeleteEvents(t *testing.T) { func testCachingObjects(t *testing.T, watchersCount int) { backingStorage := &dummyStorage{} - cacher, _, err := newTestCacher(backingStorage, 10) + cacher, _, err := newTestCacher(backingStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index 7ba7d475b3e..1e91733e143 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -53,7 +53,6 @@ const ( eventFreshDuration = 75 * time.Second // defaultLowerBoundCapacity is a default value for event cache capacity's lower bound. - // 100 is minimum in NewHeuristicWatchCacheSizes. // TODO: Figure out, to what value we can decreased it. defaultLowerBoundCapacity = 100 diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index 1071c0f7f8f..793495f3e08 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -261,8 +261,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { continue } if len(wc.resultChan) == outgoingBufSize { - klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ - "Probably caused by slow dispatching events to watchers", outgoingBufSize) + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize) } // If user couldn't receive results fast enough, we also block incoming events from watcher. // Because storing events in local will cause more memory usage. @@ -368,9 +367,7 @@ func (wc *watchChan) sendError(err error) { func (wc *watchChan) sendEvent(e *event) { if len(wc.incomingEventChan) == incomingBufSize { - klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ - "Probably caused by slow decoding, user not receiving fast, or other processing logic", - incomingBufSize) + klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow decoding, user not receiving fast, or other processing logic", "incomingEvents", incomingBufSize) } select { case wc.incomingEventChan <- e: diff --git a/vendor/k8s.io/apiserver/pkg/storage/tests/cacher_test.go b/vendor/k8s.io/apiserver/pkg/storage/tests/cacher_test.go index 18a7f674157..b9de03e15cc 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/tests/cacher_test.go +++ b/vendor/k8s.io/apiserver/pkg/storage/tests/cacher_test.go @@ -105,15 +105,14 @@ func newEtcdTestStorage(t *testing.T, prefix string) (*etcd3testing.EtcdTestServ return server, storage } -func newTestCacher(s storage.Interface, cap int) (*cacherstorage.Cacher, storage.Versioner, error) { - return newTestCacherWithClock(s, cap, clock.RealClock{}) +func newTestCacher(s storage.Interface) (*cacherstorage.Cacher, storage.Versioner, error) { + return newTestCacherWithClock(s, clock.RealClock{}) } -func newTestCacherWithClock(s storage.Interface, cap int, clock clock.Clock) (*cacherstorage.Cacher, storage.Versioner, error) { +func newTestCacherWithClock(s storage.Interface, clock clock.Clock) (*cacherstorage.Cacher, storage.Versioner, error) { prefix := "pods" v := etcd3.APIObjectVersioner{} config := cacherstorage.Config{ - CacheCapacity: cap, Storage: s, Versioner: v, ResourcePrefix: prefix, @@ -160,7 +159,7 @@ func updatePod(t *testing.T, s storage.Interface, obj, old *example.Pod) *exampl func TestGet(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, _, err := newTestCacher(etcdStorage, 10) + cacher, _, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -194,7 +193,7 @@ func TestGet(t *testing.T) { func TestGetToList(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, _, err := newTestCacher(etcdStorage, 10) + cacher, _, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -253,7 +252,7 @@ func TestGetToList(t *testing.T) { func TestList(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, _, err := newTestCacher(etcdStorage, 10) + cacher, _, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -339,7 +338,7 @@ func TestList(t *testing.T) { func TestTooLargeResourceVersionList(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -406,7 +405,7 @@ func TestWatch(t *testing.T) { etcdStorage = &injectListError{errors: 1, Interface: etcdStorage} defer server.Terminate(t) fakeClock := clock.NewFakeClock(time.Now()) - cacher, _, err := newTestCacherWithClock(etcdStorage, watchCacheDefaultCapacity, fakeClock) + cacher, _, err := newTestCacherWithClock(etcdStorage, fakeClock) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -493,7 +492,7 @@ func TestWatch(t *testing.T) { func TestWatcherTimeout(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, _, err := newTestCacher(etcdStorage, 10) + cacher, _, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -538,7 +537,7 @@ func TestWatcherTimeout(t *testing.T) { func TestFiltering(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, _, err := newTestCacher(etcdStorage, 10) + cacher, _, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -603,7 +602,7 @@ func TestFiltering(t *testing.T) { func TestStartingResourceVersion(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -668,7 +667,7 @@ func TestEmptyWatchEventCache(t *testing.T) { fooCreated := updatePod(t, etcdStorage, makeTestPod("foo"), nil) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -725,7 +724,7 @@ func TestEmptyWatchEventCache(t *testing.T) { func TestRandomWatchDeliver(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -858,7 +857,7 @@ func TestWatchDispatchBookmarkEvents(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } @@ -923,7 +922,7 @@ func TestWatchBookmarksWithCorrectResourceVersion(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcd3testing.PathPrefix()) defer server.Terminate(t) - cacher, v, err := newTestCacher(etcdStorage, 10) + cacher, v, err := newTestCacher(etcdStorage) if err != nil { t.Fatalf("Couldn't create cacher: %v", err) } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index e1c446576ea..577ae2eb1f6 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -641,14 +641,15 @@ func (cfgCtl *configController) startRequest(ctx context.Context, rd RequestDige numQueues = plState.pl.Spec.Limited.LimitResponse.Queuing.Queues } + var flowDistinguisher string var hashValue uint64 if numQueues > 1 { - flowDistinguisher := computeFlowDistinguisher(rd, fs.Spec.DistinguisherMethod) + flowDistinguisher = computeFlowDistinguisher(rd, fs.Spec.DistinguisherMethod) hashValue = hashFlowID(fs.Name, flowDistinguisher) } startWaitingTime = time.Now() klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, numQueues=%d", rd, fs.Name, fs.Spec.DistinguisherMethod, plName, numQueues) - req, idle := plState.queues.StartRequest(ctx, hashValue, fs.Name, rd.RequestInfo, rd.User) + req, idle := plState.queues.StartRequest(ctx, hashValue, flowDistinguisher, fs.Name, rd.RequestInfo, rd.User) if idle { cfgCtl.maybeReapLocked(plName, plState) } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go new file mode 100644 index 00000000000..4a467b6d9f2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller_debug.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "text/tabwriter" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/server/mux" +) + +const ( + queryIncludeRequestDetails = "includeRequestDetails" +) + +func (cfgCtl *configController) Install(c *mux.PathRecorderMux) { + // TODO(yue9944882): handle "Accept" header properly + // debugging dumps a CSV content for three levels of granularity + // 1. row per priority-level + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_priority_levels", cfgCtl.dumpPriorityLevels) + // 2. row per queue + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_queues", cfgCtl.dumpQueues) + // 3. row per request + c.UnlistedHandleFunc("/debug/api_priority_and_fairness/dump_requests", cfgCtl.dumpRequests) +} + +func (cfgCtl *configController) dumpPriorityLevels(w http.ResponseWriter, r *http.Request) { + cfgCtl.lock.Lock() + defer cfgCtl.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "ActiveQueues", // 2 + "IsIdle", // 3 + "IsQuiescing", // 4 + "WaitingRequests", // 5 + "ExecutingRequests", // 6 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endline(tabWriter) + for _, plState := range cfgCtl.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + "", // 6 + )) + endline(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + activeQueueNum := 0 + for _, q := range queueSetDigest.Queues { + if len(q.Requests) > 0 { + activeQueueNum++ + } + } + + tabPrint(tabWriter, rowForPriorityLevel( + plState.pl.Name, // 1 + activeQueueNum, // 2 + plState.queues.IsIdle(), // 3 + plState.quiescing, // 4 + queueSetDigest.Waiting, // 5 + queueSetDigest.Executing, // 6 + )) + endline(tabWriter) + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtl *configController) dumpQueues(w http.ResponseWriter, r *http.Request) { + cfgCtl.lock.Lock() + defer cfgCtl.lock.Unlock() + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + columnHeaders := []string{ + "PriorityLevelName", // 1 + "Index", // 2 + "PendingRequests", // 3 + "ExecutingRequests", // 4 + "VirtualStart", // 5 + } + tabPrint(tabWriter, rowForHeaders(columnHeaders)) + endline(tabWriter) + for _, plState := range cfgCtl.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + )) + endline(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(false) + for i, q := range queueSetDigest.Queues { + tabPrint(tabWriter, rowForQueue( + plState.pl.Name, // 1 + i, // 2 + len(q.Requests), // 3 + q.ExecutingRequests, // 4 + q.VirtualStart, // 5 + )) + endline(tabWriter) + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func (cfgCtl *configController) dumpRequests(w http.ResponseWriter, r *http.Request) { + cfgCtl.lock.Lock() + defer cfgCtl.lock.Unlock() + + includeRequestDetails := len(r.URL.Query().Get(queryIncludeRequestDetails)) > 0 + + tabWriter := tabwriter.NewWriter(w, 8, 0, 1, ' ', 0) + tabPrint(tabWriter, rowForHeaders([]string{ + "PriorityLevelName", // 1 + "FlowSchemaName", // 2 + "QueueIndex", // 3 + "RequestIndexInQueue", // 4 + "FlowDistingsher", // 5 + "ArriveTime", // 6 + })) + if includeRequestDetails { + tabPrint(tabWriter, rowForHeaders([]string{ + "UserName", // 7 + "Verb", // 8 + "APIPath", // 9 + "Namespace", // 10 + "Name", // 11 + "APIVersion", // 12 + "Resource", // 13 + "SubResource", // 14 + })) + } + endline(tabWriter) + for _, plState := range cfgCtl.priorityLevelStates { + if plState.queues == nil { + tabPrint(tabWriter, row( + plState.pl.Name, // 1 + "", // 2 + "", // 3 + "", // 4 + "", // 5 + "", // 6 + )) + if includeRequestDetails { + tabPrint(tabWriter, row( + "", // 7 + "", // 8 + "", // 9 + "", // 10 + "", // 11 + "", // 12 + "", // 13 + "", // 14 + )) + } + endline(tabWriter) + continue + } + queueSetDigest := plState.queues.Dump(includeRequestDetails) + for iq, q := range queueSetDigest.Queues { + for ir, r := range q.Requests { + tabPrint(tabWriter, rowForRequest( + plState.pl.Name, // 1 + r.MatchedFlowSchema, // 2 + iq, // 3 + ir, // 4 + r.FlowDistinguisher, // 5 + r.ArriveTime, // 6 + )) + if includeRequestDetails { + tabPrint(tabWriter, rowForRequestDetails( + r.UserName, // 7 + r.RequestInfo.Verb, // 8 + r.RequestInfo.Path, // 9 + r.RequestInfo.Namespace, // 10 + r.RequestInfo.Name, // 11 + schema.GroupVersion{ + Group: r.RequestInfo.APIGroup, + Version: r.RequestInfo.APIVersion, + }.String(), // 12 + r.RequestInfo.Resource, // 13 + r.RequestInfo.Subresource, // 14 + )) + } + endline(tabWriter) + } + } + } + runtime.HandleError(tabWriter.Flush()) +} + +func tabPrint(w io.Writer, row string) { + _, err := fmt.Fprint(w, row) + runtime.HandleError(err) +} +func endline(w io.Writer) { + _, err := fmt.Fprint(w, "\n") + runtime.HandleError(err) +} + +func rowForHeaders(headers []string) string { + return row(headers...) +} + +func rowForPriorityLevel(plName string, activeQueues int, isIdle, isQuiescing bool, waitingRequests, executingRequests int) string { + return row( + plName, + strconv.Itoa(activeQueues), + strconv.FormatBool(isIdle), + strconv.FormatBool(isQuiescing), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + ) +} + +func rowForQueue(plName string, index, waitingRequests, executingRequests int, virtualStart float64) string { + return row( + plName, + strconv.Itoa(index), + strconv.Itoa(waitingRequests), + strconv.Itoa(executingRequests), + fmt.Sprintf("%.4f", virtualStart), + ) +} + +func rowForRequest(plName, fsName string, queueIndex, requestIndex int, flowDistinguisher string, arriveTime time.Time) string { + return row( + plName, + fsName, + strconv.Itoa(queueIndex), + strconv.Itoa(requestIndex), + flowDistinguisher, + arriveTime.UTC().Format(time.RFC3339Nano), + ) +} + +func rowForRequestDetails(username, verb, path, namespace, name, apiVersion, resource, subResource string) string { + return row( + username, + verb, + path, + ) +} + +func row(columns ...string) string { + return strings.Join(columns, ",\t") + ",\t" +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 79bc9faad1a..0ee68f2340f 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/util/flowcontrol/counter" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" fqs "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset" @@ -51,6 +52,9 @@ type Interface interface { // any needed changes to local behavior. This method ceases // activity and returns after the given channel is closed. Run(stopCh <-chan struct{}) error + + // Install installs debugging endpoints to the web-server. + Install(c *mux.PathRecorderMux) } // This request filter implements https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go index 84a5d72c5a8..faa8c74080f 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/controller_test.go @@ -28,6 +28,7 @@ import ( fcv1a1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/apimachinery/pkg/util/sets" fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format" "k8s.io/client-go/informers" @@ -95,6 +96,10 @@ func (cqs *ctlTestQueueSet) BeginConfigChange(qc fq.QueuingConfig) (fq.QueueSetC return ctlTestQueueSetCompleter{cqs.cts, cqs, qc}, nil } +func (cqs *ctlTestQueueSet) Dump(bool) debug.QueueSetDump { + return debug.QueueSetDump{} +} + func (cqc ctlTestQueueSetCompleter) Complete(dc fq.DispatchingConfig) fq.QueueSet { cqc.cts.lock.Lock() defer cqc.cts.lock.Unlock() @@ -115,7 +120,7 @@ func (cqs *ctlTestQueueSet) IsIdle() bool { return cqs.countActive == 0 } -func (cqs *ctlTestQueueSet) StartRequest(ctx context.Context, hashValue uint64, fsName string, descr1, descr2 interface{}) (req fq.Request, idle bool) { +func (cqs *ctlTestQueueSet) StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}) (req fq.Request, idle bool) { cqs.cts.lock.Lock() defer cqs.cts.lock.Unlock() cqs.countActive++ diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go new file mode 100644 index 00000000000..d668d9fe7b2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "k8s.io/apiserver/pkg/endpoints/request" + "time" +) + +// QueueSetDump is an instant dump of queue-set. +type QueueSetDump struct { + Queues []QueueDump + Waiting int + Executing int +} + +// QueueDump is an instant dump of one queue in a queue-set. +type QueueDump struct { + Requests []RequestDump + VirtualStart float64 + ExecutingRequests int +} + +// RequestDump is an instant dump of one requests pending in the queue. +type RequestDump struct { + MatchedFlowSchema string + FlowDistinguisher string + ArriveTime time.Time + StartTime time.Time + // request details + UserName string + RequestInfo request.RequestInfo +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 5e573bf8f21..3ac03f78758 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -19,6 +19,8 @@ package fairqueuing import ( "context" "time" + + "k8s.io/apiserver/pkg/util/flowcontrol/debug" ) // QueueSetFactory is used to create QueueSet objects. Creation, like @@ -77,7 +79,13 @@ type QueueSet interface { // was idle at the moment of the return. Otherwise idle==false // and the client must call the Finish method of the Request // exactly once. - StartRequest(ctx context.Context, hashValue uint64, fsName string, descr1, descr2 interface{}) (req Request, idle bool) + StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}) (req Request, idle bool) + + // Dump saves and returns the instant internal state of the queue-set. + // Note that dumping process will stop the queue-set from proceeding + // any requests. + // For debugging only. + Dump(includeRequestDetails bool) debug.QueueSetDump } // Request represents the remainder of the handling of one request diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index 57ce829bcc5..316ca34794c 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/flowcontrol/counter" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/lockingpromise" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" @@ -221,7 +222,7 @@ const ( // executing at each point where there is a change in that quantity, // because the metrics --- and only the metrics --- track that // quantity per FlowSchema. -func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, fsName string, descr1, descr2 interface{}) (fq.Request, bool) { +func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}) (fq.Request, bool) { qs.lockAndSyncTime() defer qs.lock.Unlock() var req *request @@ -235,7 +236,7 @@ func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, fsName s metrics.AddReject(qs.qCfg.Name, fsName, "concurrency-limit") return nil, qs.isIdleLocked() } - req = qs.dispatchSansQueueLocked(ctx, fsName, descr1, descr2) + req = qs.dispatchSansQueueLocked(ctx, flowDistinguisher, fsName, descr1, descr2) return req, false } @@ -246,7 +247,7 @@ func (qs *queueSet) StartRequest(ctx context.Context, hashValue uint64, fsName s // 3) Reject current request if there is not enough concurrency shares and // we are at max queue length // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, hashValue, fsName, descr1, descr2) + req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, hashValue, flowDistinguisher, fsName, descr1, descr2) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -398,7 +399,7 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, hashValue uint64, fsName string, descr1, descr2 interface{}) *request { +func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { // Start with the shuffle sharding, to pick a queue. queueIdx := qs.chooseQueueIndexLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] @@ -410,14 +411,15 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte // Create a request and enqueue req := &request{ - qs: qs, - fsName: fsName, - ctx: ctx, - decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), - arrivalTime: qs.clock.Now(), - queue: queue, - descr1: descr1, - descr2: descr2, + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: qs.clock.Now(), + queue: queue, + descr1: descr1, + descr2: descr2, } if ok := qs.rejectOrEnqueueLocked(req); !ok { return nil @@ -523,17 +525,18 @@ func (qs *queueSet) dispatchAsMuchAsPossibleLocked() { } } -func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, fsName string, descr1, descr2 interface{}) *request { +func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, flowDistinguisher, fsName string, descr1, descr2 interface{}) *request { now := qs.clock.Now() req := &request{ - qs: qs, - fsName: fsName, - ctx: ctx, - startTime: now, - decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), - arrivalTime: now, - descr1: descr1, - descr2: descr2, + qs: qs, + fsName: fsName, + flowDistinguisher: flowDistinguisher, + ctx: ctx, + startTime: now, + decision: lockingpromise.NewWriteOnce(&qs.lock, qs.counter), + arrivalTime: now, + descr1: descr1, + descr2: descr2, } req.decision.SetLocked(decisionExecute) qs.totRequestsExecuting++ @@ -709,3 +712,17 @@ func (qs *queueSet) preCreateOrUnblockGoroutine() { func (qs *queueSet) goroutineDoneOrBlocked() { qs.counter.Add(-1) } + +func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump { + qs.lock.Lock() + defer qs.lock.Unlock() + d := debug.QueueSetDump{ + Queues: make([]debug.QueueDump, len(qs.queues)), + Waiting: qs.totRequestsWaiting, + Executing: qs.totRequestsExecuting, + } + for i, q := range qs.queues { + d.Queues[i] = q.dump(includeRequestDetails) + } + return d +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go index 50bc2f7b4b8..e9c700efcc4 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset_test.go @@ -79,7 +79,7 @@ func exerciseQueueSetUniformScenario(t *testing.T, name string, qs fq.QueueSet, go func(i, j int, uc uniformClient, igr test.Integrator) { for k := 0; k < uc.nCalls; k++ { ClockWait(clk, counter, uc.thinkDuration) - req, idle := qs.StartRequest(context.Background(), uc.hash, fsName, name, []int{i, j, k}) + req, idle := qs.StartRequest(context.Background(), uc.hash, "", fsName, name, []int{i, j, k}) t.Logf("%s: %d, %d, %d got req=%p, idle=%v", clk.Now().Format(nsTimeFmt), i, j, k, req, idle) if req == nil { atomic.AddUint64(&failedCount, 1) @@ -346,7 +346,7 @@ func TestContextCancel(t *testing.T) { qs := qsc.Complete(fq.DispatchingConfig{ConcurrencyLimit: 1}) counter.Add(1) // account for the goroutine running this test ctx1 := context.Background() - req1, _ := qs.StartRequest(ctx1, 1, "fs1", "test", "one") + req1, _ := qs.StartRequest(ctx1, 1, "", "fs1", "test", "one") if req1 == nil { t.Error("Request rejected") return @@ -362,7 +362,7 @@ func TestContextCancel(t *testing.T) { counter.Add(1) cancel2() }() - req2, idle2a := qs.StartRequest(ctx2, 2, "fs2", "test", "two") + req2, idle2a := qs.StartRequest(ctx2, 2, "", "fs2", "test", "two") if idle2a { t.Error("2nd StartRequest returned idle") } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go index 1facc701d9e..1bcb8cfb32c 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go @@ -20,15 +20,20 @@ import ( "context" "time" + genericrequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise" ) // request is a temporary container for "requests" with additional // tracking fields required for the functionality FQScheduler type request struct { - qs *queueSet - fsName string - ctx context.Context + ctx context.Context + + qs *queueSet + + flowDistinguisher string + fsName string // The relevant queue. Is nil if this request did not go through // a queue. @@ -94,3 +99,27 @@ func (q *queue) GetVirtualFinish(J int, G float64) float64 { jg := float64(J+1) * float64(G) return jg + q.virtualStart } + +func (q *queue) dump(includeDetails bool) debug.QueueDump { + digest := make([]debug.RequestDump, len(q.requests)) + for i, r := range q.requests { + // dump requests. + digest[i].MatchedFlowSchema = r.fsName + digest[i].FlowDistinguisher = r.flowDistinguisher + digest[i].ArriveTime = r.arrivalTime + digest[i].StartTime = r.startTime + if includeDetails { + userInfo, _ := genericrequest.UserFrom(r.ctx) + digest[i].UserName = userInfo.GetName() + requestInfo, ok := genericrequest.RequestInfoFrom(r.ctx) + if ok { + digest[i].RequestInfo = *requestInfo + } + } + } + return debug.QueueDump{ + VirtualStart: q.virtualStart, + Requests: digest, + ExecutingRequests: q.requestsExecuting, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/no-restraint.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/no-restraint.go index 14504f20179..72e7f5706ef 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/no-restraint.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/testing/no-restraint.go @@ -19,6 +19,7 @@ package testing import ( "context" + "k8s.io/apiserver/pkg/util/flowcontrol/debug" fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing" ) @@ -53,10 +54,14 @@ func (noRestraint) IsIdle() bool { return false } -func (noRestraint) StartRequest(ctx context.Context, hashValue uint64, fsName string, descr1, descr2 interface{}) (fq.Request, bool) { +func (noRestraint) StartRequest(ctx context.Context, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}) (fq.Request, bool) { return noRestraintRequest{}, false } +func (noRestraint) Dump(bool) debug.QueueSetDump { + return debug.QueueSetDump{} +} + func (noRestraintRequest) Finish(execute func()) (idle bool) { execute() return false diff --git a/vendor/knative.dev/pkg/OWNERS_ALIASES b/vendor/knative.dev/pkg/OWNERS_ALIASES index 35fc9020582..98bd1203f06 100644 --- a/vendor/knative.dev/pkg/OWNERS_ALIASES +++ b/vendor/knative.dev/pkg/OWNERS_ALIASES @@ -8,14 +8,20 @@ aliases: - mattmoor - vaikas - n3wscott + apis-reviewers: + - whaught apis-duck-approvers: - mattmoor - vaikas + apis-duck-reviewers: + - whaught codegen-approvers: - mattmoor - n3wscott + codegen-reviewers: + - whaught configmap-approvers: - mattmoor @@ -28,6 +34,8 @@ aliases: - mattmoor - tcnghia - vagababov + controller-reviewers: + - whaught kmeta-approvers: - mattmoor @@ -65,4 +73,5 @@ aliases: - dprotaso - mattmoor - tcnghia - + webhook-reviewers: + - whaught diff --git a/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go index 45a06ab921b..657c30e3fa8 100644 --- a/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go +++ b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -54,7 +54,7 @@ func NewMigrator(d dynamic.Interface, a apixclient.Interface) *Migrator { // Finally the migrator will update the CRD's status and drop older storage // versions func (m *Migrator) Migrate(ctx context.Context, gr schema.GroupResource) error { - crdClient := m.apixClient.ApiextensionsV1beta1().CustomResourceDefinitions() + crdClient := m.apixClient.ApiextensionsV1().CustomResourceDefinitions() crd, err := crdClient.Get(gr.String(), metav1.GetOptions{}) if err != nil { @@ -63,6 +63,10 @@ func (m *Migrator) Migrate(ctx context.Context, gr schema.GroupResource) error { version := storageVersion(crd) + if version == "" { + return fmt.Errorf("unable to determine storage version for %s", gr) + } + if err := m.migrateResources(ctx, gr.WithVersion(version)); err != nil { return err } @@ -112,9 +116,5 @@ func storageVersion(crd *apix.CustomResourceDefinition) string { } } - if version == "" { - version = crd.Spec.Version - } - return version } diff --git a/vendor/knative.dev/pkg/apiextensions/storageversion/migrator_test.go b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator_test.go index ff73b3b165e..90c37bfff16 100644 --- a/vendor/knative.dev/pkg/apiextensions/storageversion/migrator_test.go +++ b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator_test.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apixFake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS index a25420ebc0d..1227b2b9e8b 100644 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -2,3 +2,6 @@ approvers: - apis-approvers + +reviewers: +- apis-reviewers diff --git a/vendor/knative.dev/pkg/apis/condition_set.go b/vendor/knative.dev/pkg/apis/condition_set.go index 8d02fec485b..e354167b9ef 100644 --- a/vendor/knative.dev/pkg/apis/condition_set.go +++ b/vendor/knative.dev/pkg/apis/condition_set.go @@ -184,25 +184,25 @@ func (r conditionsImpl) GetCondition(t ConditionType) *Condition { // SetCondition sets or updates the Condition on Conditions for Condition.Type. // If there is an update, Conditions are stored back sorted. -func (r conditionsImpl) SetCondition(new Condition) { +func (r conditionsImpl) SetCondition(cond Condition) { if r.accessor == nil { return } - t := new.Type + t := cond.Type var conditions Conditions for _, c := range r.accessor.GetConditions() { if c.Type != t { conditions = append(conditions, c) } else { // If we'd only update the LastTransitionTime, then return. - new.LastTransitionTime = c.LastTransitionTime - if reflect.DeepEqual(&new, &c) { + cond.LastTransitionTime = c.LastTransitionTime + if reflect.DeepEqual(cond, c) { return } } } - new.LastTransitionTime = VolatileTime{Inner: metav1.NewTime(time.Now())} - conditions = append(conditions, new) + cond.LastTransitionTime = VolatileTime{Inner: metav1.NewTime(time.Now())} + conditions = append(conditions, cond) // Sorted for convenience of the consumer, i.e. kubectl. sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type }) r.accessor.SetConditions(conditions) diff --git a/vendor/knative.dev/pkg/apis/deprecated.go b/vendor/knative.dev/pkg/apis/deprecated.go index b930fbb91a2..fc9bca5f1c0 100644 --- a/vendor/knative.dev/pkg/apis/deprecated.go +++ b/vendor/knative.dev/pkg/apis/deprecated.go @@ -36,7 +36,7 @@ func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError { // CheckDeprecated checks whether the provided named deprecated fields // are set in a context where deprecation is disallowed. // This is a json shallow check. We will recursively check inlined structs. -func CheckDeprecatedUpdate(ctx context.Context, obj interface{}, original interface{}) *FieldError { +func CheckDeprecatedUpdate(ctx context.Context, obj, original interface{}) *FieldError { if IsDeprecatedAllowed(ctx) { return nil } diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS index ad4d83c51e4..8df611ba004 100644 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -2,3 +2,6 @@ approvers: - apis-duck-approvers + +reviewers: +- apis-duck-reviewers diff --git a/vendor/knative.dev/pkg/apis/duck/register.go b/vendor/knative.dev/pkg/apis/duck/register.go index d10adc21ce7..d84cd49d18b 100644 --- a/vendor/knative.dev/pkg/apis/duck/register.go +++ b/vendor/knative.dev/pkg/apis/duck/register.go @@ -18,4 +18,12 @@ package duck const ( GroupName = "duck.knative.dev" + + // AddressableDuckVersionLabel is the label we use to declare + // that a type conforms to the Addressable duck type. + AddressableDuckVersionLabel = "duck.knative.dev/addressable" + + // SourceDuckVersionLabel is the label we use to declare + // that a type conforms to the Source duck type. + SourceDuckVersionLabel = "duck.knative.dev/source" ) diff --git a/vendor/knative.dev/pkg/apis/duck/typed_test.go b/vendor/knative.dev/pkg/apis/duck/typed_test.go index 08b693c78e3..4ef97e170e9 100644 --- a/vendor/knative.dev/pkg/apis/duck/typed_test.go +++ b/vendor/knative.dev/pkg/apis/duck/typed_test.go @@ -109,8 +109,8 @@ func TestInvalidResource(t *testing.T) { _, _, got := tif.Get(SchemeGroupVersion.WithResource("resources")) - if got != testErr { - t.Errorf("Error = %v, want: %v", got, testErr) + if got != errTest { + t.Errorf("Error = %v, want: %v", got, errTest) } } @@ -296,7 +296,7 @@ func (bo *badObject) DeepCopyObject() runtime.Object { return &badObject{} } -var testErr = errors.New("failed to get list") +var errTest = errors.New("failed to get list") type invalidResourceClient struct { *fake.FakeDynamicClient @@ -311,5 +311,5 @@ type invalidResource struct { } func (*invalidResource) List(options metav1.ListOptions) (*unstructured.UnstructuredList, error) { - return nil, testErr + return nil, errTest } diff --git a/vendor/knative.dev/pkg/apis/field_error.go b/vendor/knative.dev/pkg/apis/field_error.go index 3cedeecbbb5..cc353b58bf5 100644 --- a/vendor/knative.dev/pkg/apis/field_error.go +++ b/vendor/knative.dev/pkg/apis/field_error.go @@ -204,11 +204,12 @@ func flatten(path []string) string { var newPath []string for _, part := range path { for _, p := range strings.Split(part, ".") { - if p == CurrentField { + switch { + case p == CurrentField: continue - } else if len(newPath) > 0 && isIndex(p) { + case len(newPath) > 0 && isIndex(p): newPath[len(newPath)-1] += p - } else { + default: newPath = append(newPath, p) } } @@ -383,7 +384,7 @@ func ErrOutOfBoundsValue(value, lower, upper interface{}, fieldPath string) *Fie func CheckDisallowedFields(request, maskedRequest interface{}) *FieldError { if disallowed, err := kmp.CompareSetFields(request, maskedRequest); err != nil { return &FieldError{ - Message: fmt.Sprintf("Internal Error"), + Message: "Internal Error", Paths: []string{CurrentField}, } } else if len(disallowed) > 0 { diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go new file mode 100644 index 00000000000..9c7757d3e46 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + + v1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" + factory "knative.dev/pkg/client/injection/apiextensions/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Apiextensions().V1().CustomResourceDefinitions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.CustomResourceDefinitionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1.CustomResourceDefinitionInformer from context.") + } + return untyped.(v1.CustomResourceDefinitionInformer) +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/fake/fake.go b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/fake/fake.go new file mode 100644 index 00000000000..4eb1a3cb30f --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + customresourcedefinition "knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition" + fake "knative.dev/pkg/client/injection/apiextensions/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = customresourcedefinition.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Apiextensions().V1().CustomResourceDefinitions() + return context.WithValue(ctx, customresourcedefinition.Key{}, inf), inf.Informer() +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/controller.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/controller.go new file mode 100644 index 00000000000..315985b6b80 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/controller.go @@ -0,0 +1,139 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + corev1 "k8s.io/api/core/v1" + clientsetscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + client "knative.dev/pkg/client/injection/apiextensions/client" + customresourcedefinition "knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition" + kubeclient "knative.dev/pkg/client/injection/kube/client" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +const ( + defaultControllerAgentName = "customresourcedefinition-controller" + defaultFinalizerName = "customresourcedefinitions.apiextensions.k8s.io" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.Options to be used but the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatalf("up to one options function is supported, found %d", len(optionsFns)) + } + + customresourcedefinitionInformer := customresourcedefinition.Get(ctx) + + lister := customresourcedefinitionInformer.Lister() + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client.Get(ctx), + Lister: lister, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + t := reflect.TypeOf(r).Elem() + queueName := fmt.Sprintf("%s.%s", strings.ReplaceAll(t.PkgPath(), "/", "-"), t.Name()) + + impl := controller.NewImpl(rec, logger, queueName) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + clientsetscheme.AddToScheme(scheme.Scheme) +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go new file mode 100644 index 00000000000..e0cd0fc1e70 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go @@ -0,0 +1,430 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + json "encoding/json" + fmt "fmt" + reflect "reflect" + + zap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1" + equality "k8s.io/apimachinery/pkg/api/equality" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + cache "k8s.io/client-go/tools/cache" + record "k8s.io/client-go/tools/record" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.CustomResourceDefinition. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1.CustomResourceDefinition. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1.CustomResourceDefinition) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.CustomResourceDefinition. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1.CustomResourceDefinition. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1.CustomResourceDefinition) reconciler.Event +} + +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.CustomResourceDefinition if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1.CustomResourceDefinition. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1.CustomResourceDefinition) reconciler.Event +} + +// ReadOnlyFinalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.CustomResourceDefinition if they want to process tombstoned resources +// even when they are not the leader. Due to the nature of how finalizers are handled +// there are no guarantees that this will be called. +type ReadOnlyFinalizer interface { + // ObserveFinalizeKind implements custom logic to observe the final state of v1.CustomResourceDefinition. + // This method should not write to the API. + ObserveFinalizeKind(ctx context.Context, o *v1.CustomResourceDefinition) reconciler.Event +} + +// reconcilerImpl implements controller.Reconciler for v1.CustomResourceDefinition resources. +type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + reconciler.LeaderAwareFuncs + + // Client is used to write back status updates. + Client clientset.Interface + + // Listers index properties about resources + Lister apiextensionsv1.CustomResourceDefinitionLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client clientset.Interface, lister apiextensionsv1.CustomResourceDefinitionLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatalf("up to one options struct is supported, found %d", len(options)) + } + + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + // TODO: Consider validating when folks implement ReadOnlyFinalizer, but not Finalizer. + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + // Establish whether we are the leader for use below. + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + roi, isROI := r.reconciler.(ReadOnlyInterface) + rof, isROF := r.reconciler.(ReadOnlyFinalizer) + if !isLeader && !isROI && !isROF { + // If we are not the leader, and we don't implement either ReadOnly + // interface, then take a fast-path out. + return nil + } + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Get the resource with this namespace/name. + + getter := r.Lister + + original, err := getter.Get(name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Debugf("resource %q no longer exists", key) + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + if resource.GetDeletionTimestamp().IsZero() { + if isLeader { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) + + } else if isROI { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveKind")) + + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = roi.ObserveKind(ctx, resource) + } + } else if fin, ok := r.reconciler.(Finalizer); isLeader && ok { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "FinalizeKind")) + + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = fin.FinalizeKind(ctx, resource) + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + return fmt.Errorf("failed to clear finalizers: %w", err) + } + } else if !isLeader && isROF { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveFinalizeKind")) + + // For finalizing reconcilers, just observe when we aren't the leader. + reconcileEvent = rof.ObserveFinalizeKind(ctx, resource) + } + + // Synchronize the status. + if equality.Semantic.DeepEqual(original.Status, resource.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the injectionInformer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if !isLeader { + logger.Warn("Saw status changes when we aren't the leader!") + // TODO: Consider logging the diff at Debug? + } else if err = r.updateStatus(original, resource); err != nil { + logger.Warnw("Failed to update resource status", zap.Error(err)) + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for %q: %v", resource.Name, err) + return err + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + return reconcileEvent + } + + return nil +} + +func (r *reconcilerImpl) updateStatus(existing *v1.CustomResourceDefinition, desired *v1.CustomResourceDefinition) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + + getter := r.Client.ApiextensionsV1().CustomResourceDefinitions() + + existing, err = getter.Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + + updater := r.Client.ApiextensionsV1().CustomResourceDefinitions() + + _, err = updater.UpdateStatus(existing) + return err + }) +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { + + getter := r.Lister + + actual, err := getter.Get(resource.Name) + if err != nil { + return resource, err + } + + // Don't modify the informers copy. + existing := actual.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.NewString(existing.Finalizers...) + desiredFinalizers := sets.NewString(resource.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = existingFinalizers.List() + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.ApiextensionsV1().CustomResourceDefinitions() + + resourceName := resource.Name + resource, err = patcher.Patch(resourceName, types.MergePatchType, patch) + if err != nil { + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resourceName, err) + } else { + r.Recorder.Eventf(resource, corev1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return resource, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + resource.Finalizers = finalizers.List() + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.CustomResourceDefinition, reconcileEvent reconciler.Event) (*v1.CustomResourceDefinition, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == corev1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + resource.Finalizers = finalizers.List() + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource) +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/controller.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/controller.go new file mode 100644 index 00000000000..4ac22d3e797 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/controller.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + + customresourcedefinition "knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition" + v1customresourcedefinition "knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition" + configmap "knative.dev/pkg/configmap" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" +) + +// TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT + +// NewController creates a Reconciler for CustomResourceDefinition and returns the result of NewImpl. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + logger := logging.FromContext(ctx) + + customresourcedefinitionInformer := customresourcedefinition.Get(ctx) + + // TODO: setup additional informers here. + + r := &Reconciler{} + impl := v1customresourcedefinition.NewImpl(ctx, r) + + logger.Info("Setting up event handlers.") + + customresourcedefinitionInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + // TODO: add additional informer event handlers here. + + return impl +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/reconciler.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/reconciler.go new file mode 100644 index 00000000000..16ae428b8ab --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/stub/reconciler.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + + v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + customresourcedefinition "knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition" + reconciler "knative.dev/pkg/reconciler" +) + +// TODO: PLEASE COPY AND MODIFY THIS FILE AS A STARTING POINT + +// newReconciledNormal makes a new reconciler event with event type Normal, and +// reason CustomResourceDefinitionReconciled. +func newReconciledNormal(namespace, name string) reconciler.Event { + return reconciler.NewEvent(v1.EventTypeNormal, "CustomResourceDefinitionReconciled", "CustomResourceDefinition reconciled: \"%s/%s\"", namespace, name) +} + +// Reconciler implements controller.Reconciler for CustomResourceDefinition resources. +type Reconciler struct { + // TODO: add additional requirements here. +} + +// Check that our Reconciler implements Interface +var _ customresourcedefinition.Interface = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements Finalizer +//var _ customresourcedefinition.Finalizer = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements ReadOnlyInterface +// Implement this to observe resources even when we are not the leader. +//var _ customresourcedefinition.ReadOnlyInterface = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements ReadOnlyFinalizer +// Implement this to observe tombstoned resources even when we are not +// the leader (best effort). +//var _ customresourcedefinition.ReadOnlyFinalizer = (*Reconciler)(nil) + +// ReconcileKind implements Interface.ReconcileKind. +func (r *Reconciler) ReconcileKind(ctx context.Context, o *apiextensionsv1.CustomResourceDefinition) reconciler.Event { + // TODO: use this if the resource implements InitializeConditions. + // o.Status.InitializeConditions() + + // TODO: add custom reconciliation logic here. + + // TODO: use this if the object has .status.ObservedGeneration. + // o.Status.ObservedGeneration = o.Generation + return newReconciledNormal(o.Namespace, o.Name) +} + +// Optionally, use FinalizeKind to add finalizers. FinalizeKind will be called +// when the resource is deleted. +//func (r *Reconciler) FinalizeKind(ctx context.Context, o *apiextensionsv1.CustomResourceDefinition) reconciler.Event { +// // TODO: add custom finalization logic here. +// return nil +//} + +// Optionally, use ObserveKind to observe the resource when we are not the leader. +// func (r *Reconciler) ObserveKind(ctx context.Context, o *apiextensionsv1.CustomResourceDefinition) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +// } + +// Optionally, use ObserveFinalizeKind to observe resources being finalized when we are no the leader. +//func (r *Reconciler) ObserveFinalizeKind(ctx context.Context, o *apiextensionsv1.CustomResourceDefinition) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +//} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/controller.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/controller.go index 4f20eeb03e8..548b8e16078 100644 --- a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/controller.go +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/controller.go @@ -26,6 +26,8 @@ import ( corev1 "k8s.io/api/core/v1" clientsetscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" v1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -35,6 +37,7 @@ import ( kubeclient "knative.dev/pkg/client/injection/kube/client" controller "knative.dev/pkg/controller" logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" ) const ( @@ -56,9 +59,27 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF customresourcedefinitionInformer := customresourcedefinition.Get(ctx) + lister := customresourcedefinitionInformer.Lister() + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: client.Get(ctx), - Lister: customresourcedefinitionInformer.Lister(), + Lister: lister, reconciler: r, finalizerName: defaultFinalizerName, } diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/reconciler.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/reconciler.go index ae55b3e3228..46db0ef9855 100644 --- a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/reconciler.go +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/reconciler.go @@ -21,6 +21,7 @@ package customresourcedefinition import ( context "context" json "encoding/json" + fmt "fmt" reflect "reflect" zap "go.uber.org/zap" @@ -31,6 +32,7 @@ import ( equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" sets "k8s.io/apimachinery/pkg/util/sets" cache "k8s.io/client-go/tools/cache" @@ -63,8 +65,30 @@ type Finalizer interface { FinalizeKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event } +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1beta1.CustomResourceDefinition if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1beta1.CustomResourceDefinition. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event +} + +// ReadOnlyFinalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1beta1.CustomResourceDefinition if they want to process tombstoned resources +// even when they are not the leader. Due to the nature of how finalizers are handled +// there are no guarantees that this will be called. +type ReadOnlyFinalizer interface { + // ObserveFinalizeKind implements custom logic to observe the final state of v1beta1.CustomResourceDefinition. + // This method should not write to the API. + ObserveFinalizeKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event +} + // reconcilerImpl implements controller.Reconciler for v1beta1.CustomResourceDefinition resources. type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + reconciler.LeaderAwareFuncs + // Client is used to write back status updates. Client clientset.Interface @@ -89,13 +113,39 @@ type reconcilerImpl struct { // Check that our Reconciler implements controller.Reconciler var _ controller.Reconciler = (*reconcilerImpl)(nil) +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client clientset.Interface, lister apiextensionsv1beta1.CustomResourceDefinitionLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatalf("up to one options struct is supported, found %d", len(options)) } + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + // TODO: Consider validating when folks implement ReadOnlyFinalizer, but not Finalizer. + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: client, Lister: lister, Recorder: recorder, @@ -119,6 +169,25 @@ func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client client func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { logger := logging.FromContext(ctx) + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + // Establish whether we are the leader for use below. + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + roi, isROI := r.reconciler.(ReadOnlyInterface) + rof, isROF := r.reconciler.(ReadOnlyFinalizer) + if !isLeader && !isROI && !isROF { + // If we are not the leader, and we don't implement either ReadOnly + // interface, then take a fast-path out. + return nil + } + // If configStore is set, attach the frozen configuration to the context. if r.configStore != nil { ctx = r.configStore.ToContext(ctx) @@ -127,15 +196,6 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // Add the recorder to context. ctx = controller.WithEventRecorder(ctx, r.Recorder) - // Convert the namespace/name string into a distinct namespace and name - - _, name, err := cache.SplitMetaNamespaceKey(key) - - if err != nil { - logger.Errorf("invalid resource key: %s", key) - return nil - } - // Get the resource with this namespace/name. getter := r.Lister @@ -155,20 +215,28 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var reconcileEvent reconciler.Event if resource.GetDeletionTimestamp().IsZero() { - // Append the target method to the logger. - logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + if isLeader { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } - // Set and update the finalizer on resource if r.reconciler - // implements Finalizer. - if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { - logger.Warnw("Failed to set finalizers", zap.Error(err)) - } + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) - // Reconcile this copy of the resource and then write back any status - // updates regardless of whether the reconciliation errored out. - reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) + } else if isROI { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveKind")) - } else if fin, ok := r.reconciler.(Finalizer); ok { + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = roi.ObserveKind(ctx, resource) + } + } else if fin, ok := r.reconciler.(Finalizer); isLeader && ok { // Append the target method to the logger. logger = logger.With(zap.String("targetMethod", "FinalizeKind")) @@ -176,8 +244,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // and reconciled cleanly (nil or normal event), remove the finalizer. reconcileEvent = fin.FinalizeKind(ctx, resource) if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { - logger.Warnw("Failed to clear finalizers", zap.Error(err)) + return fmt.Errorf("failed to clear finalizers: %w", err) } + } else if !isLeader && isROF { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveFinalizeKind")) + + // For finalizing reconcilers, just observe when we aren't the leader. + reconcileEvent = rof.ObserveFinalizeKind(ctx, resource) } // Synchronize the status. @@ -186,6 +260,9 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // This is important because the copy we loaded from the injectionInformer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. + } else if !isLeader { + logger.Warn("Saw status changes when we aren't the leader!") + // TODO: Consider logging the diff at Debug? } else if err = r.updateStatus(original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/stub/reconciler.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/stub/reconciler.go index be5bd027c6d..fdd7f5a6a59 100644 --- a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/stub/reconciler.go +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1beta1/customresourcedefinition/stub/reconciler.go @@ -46,6 +46,15 @@ var _ customresourcedefinition.Interface = (*Reconciler)(nil) // Optionally check that our Reconciler implements Finalizer //var _ customresourcedefinition.Finalizer = (*Reconciler)(nil) +// Optionally check that our Reconciler implements ReadOnlyInterface +// Implement this to observe resources even when we are not the leader. +//var _ customresourcedefinition.ReadOnlyInterface = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements ReadOnlyFinalizer +// Implement this to observe tombstoned resources even when we are not +// the leader (best effort). +//var _ customresourcedefinition.ReadOnlyFinalizer = (*Reconciler)(nil) + // ReconcileKind implements Interface.ReconcileKind. func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event { // TODO: use this if the resource implements InitializeConditions. @@ -64,3 +73,15 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1beta1.CustomResourc // // TODO: add custom finalization logic here. // return nil //} + +// Optionally, use ObserveKind to observe the resource when we are not the leader. +// func (r *Reconciler) ObserveKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +// } + +// Optionally, use ObserveFinalizeKind to observe resources being finalized when we are no the leader. +//func (r *Reconciler) ObserveFinalizeKind(ctx context.Context, o *v1beta1.CustomResourceDefinition) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +//} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/fake/fake.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/fake/fake.go new file mode 100644 index 00000000000..50aa0e11184 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + mutatingwebhookconfiguration "knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration" + fake "knative.dev/pkg/client/injection/kube/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = mutatingwebhookconfiguration.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Admissionregistration().V1().MutatingWebhookConfigurations() + return context.WithValue(ctx, mutatingwebhookconfiguration.Key{}, inf), inf.Informer() +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go new file mode 100644 index 00000000000..27b95afa696 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatingwebhookconfiguration/mutatingwebhookconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package mutatingwebhookconfiguration + +import ( + context "context" + + v1 "k8s.io/client-go/informers/admissionregistration/v1" + factory "knative.dev/pkg/client/injection/kube/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Admissionregistration().V1().MutatingWebhookConfigurations() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.MutatingWebhookConfigurationInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/informers/admissionregistration/v1.MutatingWebhookConfigurationInformer from context.") + } + return untyped.(v1.MutatingWebhookConfigurationInformer) +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/fake/fake.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/fake/fake.go new file mode 100644 index 00000000000..de9b29cbee7 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + validatingwebhookconfiguration "knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration" + fake "knative.dev/pkg/client/injection/kube/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = validatingwebhookconfiguration.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Admissionregistration().V1().ValidatingWebhookConfigurations() + return context.WithValue(ctx, validatingwebhookconfiguration.Key{}, inf), inf.Informer() +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go new file mode 100644 index 00000000000..7cc5ac316ea --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration/validatingwebhookconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package validatingwebhookconfiguration + +import ( + context "context" + + v1 "k8s.io/client-go/informers/admissionregistration/v1" + factory "knative.dev/pkg/client/injection/kube/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Admissionregistration().V1().ValidatingWebhookConfigurations() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.ValidatingWebhookConfigurationInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/informers/admissionregistration/v1.ValidatingWebhookConfigurationInformer from context.") + } + return untyped.(v1.ValidatingWebhookConfigurationInformer) +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/controller.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/controller.go index 4cee36b435e..a21da03bb91 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/controller.go +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/controller.go @@ -25,6 +25,8 @@ import ( strings "strings" corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" scheme "k8s.io/client-go/kubernetes/scheme" v1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -33,6 +35,7 @@ import ( namespace "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace" controller "knative.dev/pkg/controller" logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" ) const ( @@ -54,9 +57,27 @@ func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsF namespaceInformer := namespace.Get(ctx) + lister := namespaceInformer.Lister() + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: client.Get(ctx), - Lister: namespaceInformer.Lister(), + Lister: lister, reconciler: r, finalizerName: defaultFinalizerName, } diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go index 4b7c037e310..e7e27c0175c 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go @@ -21,6 +21,7 @@ package namespace import ( context "context" json "encoding/json" + fmt "fmt" reflect "reflect" zap "go.uber.org/zap" @@ -28,6 +29,7 @@ import ( equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" sets "k8s.io/apimachinery/pkg/util/sets" kubernetes "k8s.io/client-go/kubernetes" @@ -62,8 +64,30 @@ type Finalizer interface { FinalizeKind(ctx context.Context, o *v1.Namespace) reconciler.Event } +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.Namespace if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1.Namespace. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1.Namespace) reconciler.Event +} + +// ReadOnlyFinalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.Namespace if they want to process tombstoned resources +// even when they are not the leader. Due to the nature of how finalizers are handled +// there are no guarantees that this will be called. +type ReadOnlyFinalizer interface { + // ObserveFinalizeKind implements custom logic to observe the final state of v1.Namespace. + // This method should not write to the API. + ObserveFinalizeKind(ctx context.Context, o *v1.Namespace) reconciler.Event +} + // reconcilerImpl implements controller.Reconciler for v1.Namespace resources. type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware + reconciler.LeaderAwareFuncs + // Client is used to write back status updates. Client kubernetes.Interface @@ -88,13 +112,39 @@ type reconcilerImpl struct { // Check that our Reconciler implements controller.Reconciler var _ controller.Reconciler = (*reconcilerImpl)(nil) +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client kubernetes.Interface, lister corev1.NamespaceLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatalf("up to one options struct is supported, found %d", len(options)) } + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + // TODO: Consider validating when folks implement ReadOnlyFinalizer, but not Finalizer. + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: client, Lister: lister, Recorder: recorder, @@ -118,6 +168,25 @@ func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client kubern func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { logger := logging.FromContext(ctx) + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + // Establish whether we are the leader for use below. + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + roi, isROI := r.reconciler.(ReadOnlyInterface) + rof, isROF := r.reconciler.(ReadOnlyFinalizer) + if !isLeader && !isROI && !isROF { + // If we are not the leader, and we don't implement either ReadOnly + // interface, then take a fast-path out. + return nil + } + // If configStore is set, attach the frozen configuration to the context. if r.configStore != nil { ctx = r.configStore.ToContext(ctx) @@ -126,15 +195,6 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // Add the recorder to context. ctx = controller.WithEventRecorder(ctx, r.Recorder) - // Convert the namespace/name string into a distinct namespace and name - - _, name, err := cache.SplitMetaNamespaceKey(key) - - if err != nil { - logger.Errorf("invalid resource key: %s", key) - return nil - } - // Get the resource with this namespace/name. getter := r.Lister @@ -154,20 +214,28 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { var reconcileEvent reconciler.Event if resource.GetDeletionTimestamp().IsZero() { - // Append the target method to the logger. - logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + if isLeader { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } - // Set and update the finalizer on resource if r.reconciler - // implements Finalizer. - if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { - logger.Warnw("Failed to set finalizers", zap.Error(err)) - } + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) - // Reconcile this copy of the resource and then write back any status - // updates regardless of whether the reconciliation errored out. - reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) + } else if isROI { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveKind")) - } else if fin, ok := r.reconciler.(Finalizer); ok { + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = roi.ObserveKind(ctx, resource) + } + } else if fin, ok := r.reconciler.(Finalizer); isLeader && ok { // Append the target method to the logger. logger = logger.With(zap.String("targetMethod", "FinalizeKind")) @@ -175,8 +243,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // and reconciled cleanly (nil or normal event), remove the finalizer. reconcileEvent = fin.FinalizeKind(ctx, resource) if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { - logger.Warnw("Failed to clear finalizers", zap.Error(err)) + return fmt.Errorf("failed to clear finalizers: %w", err) } + } else if !isLeader && isROF { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveFinalizeKind")) + + // For finalizing reconcilers, just observe when we aren't the leader. + reconcileEvent = rof.ObserveFinalizeKind(ctx, resource) } // Synchronize the status. @@ -185,6 +259,9 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // This is important because the copy we loaded from the injectionInformer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. + } else if !isLeader { + logger.Warn("Saw status changes when we aren't the leader!") + // TODO: Consider logging the diff at Debug? } else if err = r.updateStatus(original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/stub/reconciler.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/stub/reconciler.go index 21f78828726..570fe112253 100644 --- a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/stub/reconciler.go +++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/stub/reconciler.go @@ -45,6 +45,15 @@ var _ namespace.Interface = (*Reconciler)(nil) // Optionally check that our Reconciler implements Finalizer //var _ namespace.Finalizer = (*Reconciler)(nil) +// Optionally check that our Reconciler implements ReadOnlyInterface +// Implement this to observe resources even when we are not the leader. +//var _ namespace.ReadOnlyInterface = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements ReadOnlyFinalizer +// Implement this to observe tombstoned resources even when we are not +// the leader (best effort). +//var _ namespace.ReadOnlyFinalizer = (*Reconciler)(nil) + // ReconcileKind implements Interface.ReconcileKind. func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1.Namespace) reconciler.Event { // TODO: use this if the resource implements InitializeConditions. @@ -63,3 +72,15 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, o *v1.Namespace) reconci // // TODO: add custom finalization logic here. // return nil //} + +// Optionally, use ObserveKind to observe the resource when we are not the leader. +// func (r *Reconciler) ObserveKind(ctx context.Context, o *v1.Namespace) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +// } + +// Optionally, use ObserveFinalizeKind to observe resources being finalized when we are no the leader. +//func (r *Reconciler) ObserveFinalizeKind(ctx context.Context, o *v1.Namespace) reconciler.Event { +// // TODO: add custom observation logic here. +// return nil +//} diff --git a/vendor/knative.dev/pkg/codegen/OWNERS b/vendor/knative.dev/pkg/codegen/OWNERS index 18af7f34388..9053435aa09 100644 --- a/vendor/knative.dev/pkg/codegen/OWNERS +++ b/vendor/knative.dev/pkg/codegen/OWNERS @@ -2,3 +2,6 @@ approvers: - codegen-approvers + +reviewers: +- codegen-reviewers diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go index c54d6f915c4..50f3cb46186 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go @@ -201,7 +201,7 @@ func isKRShaped(tags map[string]map[string]string) bool { if !has { return false } - return vals["krshapedlogic"] == "true" + return vals["krshapedlogic"] != "false" } func isNonNamespaced(tags map[string]map[string]string) bool { diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go index bf3ca04fe82..ac1c9fac321 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_controller.go @@ -142,9 +142,21 @@ func (g *reconcilerControllerGenerator) GenerateType(c *generator.Context, t *ty Package: "context", Name: "Context", }), - "fmtSprintf": c.Universe.Function(types.Name{ - Package: "fmt", - Name: "Sprintf", + "reconcilerLeaderAwareFuncs": c.Universe.Type(types.Name{ + Package: "knative.dev/pkg/reconciler", + Name: "LeaderAwareFuncs", + }), + "reconcilerBucket": c.Universe.Type(types.Name{ + Package: "knative.dev/pkg/reconciler", + Name: "Bucket", + }), + "typesNamespacedName": c.Universe.Type(types.Name{ + Package: "k8s.io/apimachinery/pkg/types", + Name: "NamespacedName", + }), + "labelsEverything": c.Universe.Function(types.Name{ + Package: "k8s.io/apimachinery/pkg/labels", + Name: "Everything", }), "stringsReplaceAll": c.Universe.Function(types.Name{ Package: "strings", @@ -154,6 +166,10 @@ func (g *reconcilerControllerGenerator) GenerateType(c *generator.Context, t *ty Package: "reflect", Name: "TypeOf", }), + "fmtSprintf": c.Universe.Function(types.Name{ + Package: "fmt", + Name: "Sprintf", + }), } sw.Do(reconcilerControllerNewImpl, m) @@ -185,9 +201,27 @@ func NewImpl(ctx {{.contextContext|raw}}, r Interface{{if .hasClass}}, classValu {{.type|lowercaseSingular}}Informer := {{.informerGet|raw}}(ctx) + lister := {{.type|lowercaseSingular}}Informer.Lister() + rec := &reconcilerImpl{ + LeaderAwareFuncs: {{.reconcilerLeaderAwareFuncs|raw}}{ + PromoteFunc: func(bkt {{.reconcilerBucket|raw}}, enq func({{.reconcilerBucket|raw}}, {{.typesNamespacedName|raw}})) error { + all, err := lister.List({{.labelsEverything|raw}}()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, {{.typesNamespacedName|raw}}{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: {{.clientGet|raw}}(ctx), - Lister: {{.type|lowercaseSingular}}Informer.Lister(), + Lister: lister, reconciler: r, finalizerName: defaultFinalizerName, {{if .hasClass}}classValue: classValue,{{end}} diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go index 647b82fd0ec..b3d3596a153 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go @@ -147,10 +147,35 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty Package: "context", Name: "Context", }), + "fmtErrorf": c.Universe.Package("fmt").Function("Errorf"), "reflectDeepEqual": c.Universe.Package("reflect").Function("DeepEqual"), "equalitySemantic": c.Universe.Package("k8s.io/apimachinery/pkg/api/equality").Variable("Semantic"), "jsonMarshal": c.Universe.Package("encoding/json").Function("Marshal"), "typesMergePatchType": c.Universe.Package("k8s.io/apimachinery/pkg/types").Constant("MergePatchType"), + "syncRWMutex": c.Universe.Type(types.Name{ + Package: "sync", + Name: "RWMutex", + }), + "reconcilerLeaderAware": c.Universe.Type(types.Name{ + Package: "knative.dev/pkg/reconciler", + Name: "LeaderAware", + }), + "reconcilerLeaderAwareFuncs": c.Universe.Type(types.Name{ + Package: "knative.dev/pkg/reconciler", + Name: "LeaderAwareFuncs", + }), + "reconcilerBucket": c.Universe.Type(types.Name{ + Package: "knative.dev/pkg/reconciler", + Name: "Bucket", + }), + "typesNamespacedName": c.Universe.Type(types.Name{ + Package: "k8s.io/apimachinery/pkg/types", + Name: "NamespacedName", + }), + "labelsEverything": c.Universe.Function(types.Name{ + Package: "k8s.io/apimachinery/pkg/labels", + Name: "Everything", + }), } sw.Do(reconcilerInterfaceFactory, m) @@ -186,8 +211,30 @@ type Finalizer interface { FinalizeKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} } +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling {{.type|raw}} if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe {{.type|raw}}. + // This method should not write to the API. + ObserveKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} +} + +// ReadOnlyFinalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing {{.type|raw}} if they want to process tombstoned resources +// even when they are not the leader. Due to the nature of how finalizers are handled +// there are no guarantees that this will be called. +type ReadOnlyFinalizer interface { + // ObserveFinalizeKind implements custom logic to observe the final state of {{.type|raw}}. + // This method should not write to the API. + ObserveFinalizeKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} +} + // reconcilerImpl implements controller.Reconciler for {{.type|raw}} resources. type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement {{.reconcilerLeaderAware|raw}} + {{.reconcilerLeaderAwareFuncs|raw}} + // Client is used to write back status updates. Client {{.clientsetInterface|raw}} @@ -216,6 +263,8 @@ type reconcilerImpl struct { // Check that our Reconciler implements controller.Reconciler var _ controller.Reconciler = (*reconcilerImpl)(nil) +// Check that our generated Reconciler is always LeaderAware. +var _ {{.reconcilerLeaderAware|raw}} = (*reconcilerImpl)(nil) ` @@ -226,7 +275,30 @@ func NewReconciler(ctx {{.contextContext|raw}}, logger *{{.zapSugaredLogger|raw} logger.Fatalf("up to one options struct is supported, found %d", len(options)) } + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.({{.reconcilerLeaderAware|raw}}); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + // TODO: Consider validating when folks implement ReadOnlyFinalizer, but not Finalizer. + rec := &reconcilerImpl{ + LeaderAwareFuncs: {{.reconcilerLeaderAwareFuncs|raw}}{ + PromoteFunc: func(bkt {{.reconcilerBucket|raw}}, enq func({{.reconcilerBucket|raw}}, {{.typesNamespacedName|raw}})) error { + all, err := lister.List({{.labelsEverything|raw}}()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, {{.typesNamespacedName|raw}}{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, Client: client, Lister: lister, Recorder: recorder, @@ -253,6 +325,25 @@ var reconcilerImplFactory = ` func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) error { logger := {{.loggingFromContext|raw}}(ctx) + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := {{.cacheSplitMetaNamespaceKey|raw}}(key) + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + // Establish whether we are the leader for use below. + isLeader := r.IsLeaderFor({{.typesNamespacedName|raw}}{ + Namespace: namespace, + Name: name, + }) + roi, isROI := r.reconciler.(ReadOnlyInterface) + rof, isROF := r.reconciler.(ReadOnlyFinalizer); + if !isLeader && !isROI && !isROF { + // If we are not the leader, and we don't implement either ReadOnly + // interface, then take a fast-path out. + return nil + } + // If configStore is set, attach the frozen configuration to the context. if r.configStore != nil { ctx = r.configStore.ToContext(ctx) @@ -261,19 +352,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro // Add the recorder to context. ctx = {{.controllerWithEventRecorder|raw}}(ctx, r.Recorder) - // Convert the namespace/name string into a distinct namespace and name - {{if .nonNamespaced}} - _, name, err := {{.cacheSplitMetaNamespaceKey|raw}}(key) - {{else}} - namespace, name, err := {{.cacheSplitMetaNamespaceKey|raw}}(key) - {{end}} - if err != nil { - logger.Errorf("invalid resource key: %s", key) - return nil - } - // Get the resource with this namespace/name. - {{if .nonNamespaced}} getter := r.Lister {{else}} @@ -302,27 +381,34 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro var reconcileEvent {{.reconcilerEvent|raw}} if resource.GetDeletionTimestamp().IsZero() { - // Append the target method to the logger. - logger = logger.With(zap.String("targetMethod", "ReconcileKind")) - - // Set and update the finalizer on resource if r.reconciler - // implements Finalizer. - if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { - logger.Warnw("Failed to set finalizers", zap.Error(err)) - } + if isLeader { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ReconcileKind")) + + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return {{.fmtErrorf|raw}}("failed to set finalizers: %w", err) + } + {{if .isKRShaped}} + reconciler.PreProcessReconcile(ctx, resource) + {{end}} - {{if .isKRShaped}} - reconciler.PreProcessReconcile(ctx, resource) - {{end}} + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) - // Reconcile this copy of the resource and then write back any status - // updates regardless of whether the reconciliation errored out. - reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) + {{if .isKRShaped}} + reconciler.PostProcessReconcile(ctx, resource, original) + {{end}} + } else if isROI { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveKind")) - {{if .isKRShaped}} - reconciler.PostProcessReconcile(ctx, resource) - {{end}} - } else if fin, ok := r.reconciler.(Finalizer); ok { + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = roi.ObserveKind(ctx, resource) + } + } else if fin, ok := r.reconciler.(Finalizer); isLeader && ok { // Append the target method to the logger. logger = logger.With(zap.String("targetMethod", "FinalizeKind")) @@ -330,8 +416,14 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro // and reconciled cleanly (nil or normal event), remove the finalizer. reconcileEvent = fin.FinalizeKind(ctx, resource) if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { - logger.Warnw("Failed to clear finalizers", zap.Error(err)) + return {{.fmtErrorf|raw}}("failed to clear finalizers: %w", err) } + } else if !isLeader && isROF { + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", "ObserveFinalizeKind")) + + // For finalizing reconcilers, just observe when we aren't the leader. + reconcileEvent = rof.ObserveFinalizeKind(ctx, resource) } // Synchronize the status. @@ -340,6 +432,9 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro // This is important because the copy we loaded from the injectionInformer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. + } else if !isLeader { + logger.Warn("Saw status changes when we aren't the leader!") + // TODO: Consider logging the diff at Debug? } else if err = r.updateStatus(original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) r.Recorder.Eventf(resource, {{.corev1EventTypeWarning|raw}}, "UpdateFailed", @@ -519,4 +614,5 @@ func (r *reconcilerImpl) clearFinalizer(ctx {{.contextContext|raw}}, resource *{ // Synchronize the finalizers filtered by r.finalizerName. return r.updateFinalizersFiltered(ctx, resource) } + ` diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler_stub.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler_stub.go index 211b04080ee..62805e3d2a7 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler_stub.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler_stub.go @@ -77,6 +77,14 @@ func (g *reconcilerReconcilerStubGenerator) GenerateType(c *generator.Context, t Package: g.reconcilerPkg, Name: "Finalizer", }), + "reconcilerReadOnlyInterface": c.Universe.Type(types.Name{ + Package: g.reconcilerPkg, + Name: "ReadOnlyInterface", + }), + "reconcilerReadOnlyFinalizer": c.Universe.Type(types.Name{ + Package: g.reconcilerPkg, + Name: "ReadOnlyFinalizer", + }), "corev1EventTypeNormal": c.Universe.Type(types.Name{ Package: "k8s.io/api/core/v1", Name: "EventTypeNormal", @@ -112,16 +120,26 @@ var _ {{.reconcilerInterface|raw}} = (*Reconciler)(nil) // Optionally check that our Reconciler implements Finalizer //var _ {{.reconcilerFinalizer|raw}} = (*Reconciler)(nil) +// Optionally check that our Reconciler implements ReadOnlyInterface +// Implement this to observe resources even when we are not the leader. +//var _ {{.reconcilerReadOnlyInterface|raw}} = (*Reconciler)(nil) + +// Optionally check that our Reconciler implements ReadOnlyFinalizer +// Implement this to observe tombstoned resources even when we are not +// the leader (best effort). +//var _ {{.reconcilerReadOnlyFinalizer|raw}} = (*Reconciler)(nil) // ReconcileKind implements Interface.ReconcileKind. func (r *Reconciler) ReconcileKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} { - // TODO: use this if the resource implements InitializeConditions. + {{if not .isKRShaped}}// TODO: use this if the resource implements InitializeConditions. // o.Status.InitializeConditions() + {{end}} // TODO: add custom reconciliation logic here. + {{if not .isKRShaped}} // TODO: use this if the object has .status.ObservedGeneration. - // o.Status.ObservedGeneration = o.Generation + // o.Status.ObservedGeneration = o.Generation{{end}} return newReconciledNormal(o.Namespace, o.Name) } @@ -131,4 +149,16 @@ func (r *Reconciler) ReconcileKind(ctx {{.contextContext|raw}}, o *{{.type|raw}} // // TODO: add custom finalization logic here. // return nil //} + +// Optionally, use ObserveKind to observe the resource when we are not the leader. +// func (r *Reconciler) ObserveKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} { +// // TODO: add custom observation logic here. +// return nil +// } + +// Optionally, use ObserveFinalizeKind to observe resources being finalized when we are no the leader. +//func (r *Reconciler) ObserveFinalizeKind(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}} { +// // TODO: add custom observation logic here. +// return nil +//} ` diff --git a/vendor/knative.dev/pkg/configmap/manual_watcher.go b/vendor/knative.dev/pkg/configmap/manual_watcher.go index ad39bb8b9ef..b1eb276339f 100644 --- a/vendor/knative.dev/pkg/configmap/manual_watcher.go +++ b/vendor/knative.dev/pkg/configmap/manual_watcher.go @@ -26,9 +26,8 @@ import ( type ManualWatcher struct { Namespace string - // Guards mutations to defaultImpl fields - m sync.RWMutex - + // Guards observers + m sync.RWMutex observers map[string][]Observer } @@ -40,7 +39,7 @@ func (w *ManualWatcher) Watch(name string, o ...Observer) { defer w.m.Unlock() if w.observers == nil { - w.observers = make(map[string][]Observer, len(o)) + w.observers = make(map[string][]Observer, 1) } w.observers[name] = append(w.observers[name], o...) } @@ -58,13 +57,8 @@ func (w *ManualWatcher) OnChange(configMap *corev1.ConfigMap) { // Within our namespace, take the lock and see if there are any registered observers. w.m.RLock() defer w.m.RUnlock() - observers, ok := w.observers[configMap.Name] - if !ok { - return // No observers. - } - // Iterate over the observers and invoke their callbacks. - for _, o := range observers { + for _, o := range w.observers[configMap.Name] { o(configMap) } } diff --git a/vendor/knative.dev/pkg/configmap/manual_watcher_test.go b/vendor/knative.dev/pkg/configmap/manual_watcher_test.go index 72f35ee37a5..dfe0c64e9d9 100644 --- a/vendor/knative.dev/pkg/configmap/manual_watcher_test.go +++ b/vendor/knative.dev/pkg/configmap/manual_watcher_test.go @@ -37,57 +37,43 @@ func TestCallbackInvoked(t *testing.T) { Namespace: "default", } - observer := counter{} - - watcher.Watch("foo", observer.callback) + // Verify empty works as designed. watcher.OnChange(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "foo", }, }) - - if observer.count() == 0 { - t.Errorf("Expected callback to be invoked - got invocations %v", observer.count()) - } -} - -func TestDifferentNamespace(t *testing.T) { - watcher := ManualWatcher{ - Namespace: "default", - } - observer := counter{} watcher.Watch("foo", observer.callback) watcher.OnChange(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "not-default", + Namespace: "default", Name: "foo", }, }) - if observer.count() != 0 { - t.Errorf("Expected callback to be not be invoked - got invocations %v", observer.count()) + if observer.count() == 0 { + t.Errorf("Expected callback to be invoked - got invocations %v", observer.count()) } } -func TestLateRegistration(t *testing.T) { +func TestDifferentNamespace(t *testing.T) { watcher := ManualWatcher{ Namespace: "default", } observer := counter{} + watcher.Watch("foo", observer.callback) watcher.OnChange(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", + Namespace: "not-default", Name: "foo", }, }) - watcher.Watch("foo", observer.callback) - if observer.count() != 0 { t.Errorf("Expected callback to be not be invoked - got invocations %v", observer.count()) } diff --git a/vendor/knative.dev/pkg/configmap/parse.go b/vendor/knative.dev/pkg/configmap/parse.go index 210d2114ede..1ef67581810 100644 --- a/vendor/knative.dev/pkg/configmap/parse.go +++ b/vendor/knative.dev/pkg/configmap/parse.go @@ -77,6 +77,20 @@ func AsInt64(key string, target *int64) ParseFunc { } } +// AsUint32 parses the value at key as an uint32 into the target, if it exists. +func AsUint32(key string, target *uint32) ParseFunc { + return func(data map[string]string) error { + if raw, ok := data[key]; ok { + val, err := strconv.ParseUint(raw, 10, 32) + if err != nil { + return fmt.Errorf("failed to parse %q: %w", key, err) + } + *target = uint32(val) + } + return nil + } +} + // AsFloat64 parses the value at key as a float64 into the target, if it exists. func AsFloat64(key string, target *float64) ParseFunc { return func(data map[string]string) error { diff --git a/vendor/knative.dev/pkg/configmap/parse_test.go b/vendor/knative.dev/pkg/configmap/parse_test.go index 477dd1488ee..26fb3332df8 100644 --- a/vendor/knative.dev/pkg/configmap/parse_test.go +++ b/vendor/knative.dev/pkg/configmap/parse_test.go @@ -30,6 +30,7 @@ type testConfig struct { boo bool i32 int32 i64 int64 + u32 uint32 f64 float64 dur time.Duration set sets.String @@ -51,6 +52,7 @@ func TestParse(t *testing.T) { "test-bool": "true", "test-int32": "1", "test-int64": "2", + "test-uint32": "3", "test-float64": "1.0", "test-duration": "1m", "test-set": "a,b,c", @@ -61,6 +63,7 @@ func TestParse(t *testing.T) { boo: true, i32: 1, i64: 2, + u32: 3, f64: 1.0, dur: time.Minute, set: sets.NewString("a", "b", "c"), @@ -106,6 +109,12 @@ func TestParse(t *testing.T) { "test-int64": "foo", }, expectErr: true, + }, { + name: "uint32 error", + data: map[string]string{ + "test-uint32": "foo", + }, + expectErr: true, }, { name: "float64 error", data: map[string]string{ @@ -133,6 +142,7 @@ func TestParse(t *testing.T) { AsBool("test-bool", &test.conf.boo), AsInt32("test-int32", &test.conf.i32), AsInt64("test-int64", &test.conf.i64), + AsUint32("test-uint32", &test.conf.u32), AsFloat64("test-float64", &test.conf.f64), AsDuration("test-duration", &test.conf.dur), AsStringSet("test-set", &test.conf.set), diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS index afa22257a26..0b270d53af0 100644 --- a/vendor/knative.dev/pkg/controller/OWNERS +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -2,3 +2,6 @@ approvers: - controller-approvers + +reviewers: +- controller-reviewers diff --git a/vendor/knative.dev/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go index 3981c58204e..6024ec2536e 100644 --- a/vendor/knative.dev/pkg/controller/controller.go +++ b/vendor/knative.dev/pkg/controller/controller.go @@ -36,8 +36,10 @@ import ( "k8s.io/client-go/util/workqueue" "knative.dev/pkg/kmeta" + kle "knative.dev/pkg/leaderelection" "knative.dev/pkg/logging" "knative.dev/pkg/logging/logkey" + "knative.dev/pkg/reconciler" ) const ( @@ -176,6 +178,10 @@ func FilterWithNameAndNamespace(namespace, name string) func(obj interface{}) bo // Impl is our core controller implementation. It handles queuing and feeding work // from the queue to an implementation of Reconciler. type Impl struct { + // Name is the unique name for this controller workqueue within this process. + // This is used for surfacing metrics, and per-controller leader election. + Name string + // Reconciler is the workhorse of this controller, it is fed the keys // from the workqueue to process. Public for testing. Reconciler Reconciler @@ -205,7 +211,9 @@ func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Imp } func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl { + logger = logger.Named(workQueueName) return &Impl{ + Name: workQueueName, Reconciler: r, WorkQueue: workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), @@ -341,6 +349,14 @@ func (c *Impl) EnqueueKey(key types.NamespacedName) { c.logger.Debugf("Adding to queue %s (depth: %d)", safeKey(key), c.WorkQueue.Len()) } +// MaybeEnqueueBucketKey takes a Bucket and namespace/name string and puts it onto the work queue. +func (c *Impl) MaybeEnqueueBucketKey(bkt reconciler.Bucket, key types.NamespacedName) { + if bkt.Has(key) { + c.WorkQueue.Add(key) + c.logger.Debugf("Adding to queue %s (depth: %d)", safeKey(key), c.WorkQueue.Len()) + } +} + // EnqueueKeyAfter takes a namespace/name string and schedules its execution in // the work queue after given delay. func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) { @@ -349,10 +365,12 @@ func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) { } // RunContext starts the controller's worker threads, the number of which is threadiness. +// If the context has been decorated for LeaderElection, then an elector is built and run. // It then blocks until the context is cancelled, at which point it shuts down its // internal work queue and waits for workers to finish processing their current // work items. func (c *Impl) RunContext(ctx context.Context, threadiness int) error { + logger := c.logger defer runtime.HandleCrash() sg := sync.WaitGroup{} defer sg.Wait() @@ -363,8 +381,20 @@ func (c *Impl) RunContext(ctx context.Context, threadiness int) error { } }() + if la, ok := c.Reconciler.(reconciler.LeaderAware); ok { + // Build and execute an elector. + le, err := kle.BuildElector(ctx, la, c.Name, c.MaybeEnqueueBucketKey) + if err != nil { + return err + } + sg.Add(1) + go func() { + defer sg.Done() + le.Run(ctx) + }() + } + // Launch workers to process resources that get enqueued to our workqueue. - logger := c.logger logger.Info("Starting controller and workers") for i := 0; i < threadiness; i++ { sg.Add(1) diff --git a/vendor/knative.dev/pkg/controller/controller_test.go b/vendor/knative.dev/pkg/controller/controller_test.go index 3f857c78d0a..3556af61aa8 100644 --- a/vendor/knative.dev/pkg/controller/controller_test.go +++ b/vendor/knative.dev/pkg/controller/controller_test.go @@ -25,15 +25,22 @@ import ( "time" "github.com/google/go-cmp/cmp" + coordinationv1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + fakekube "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" . "knative.dev/pkg/controller/testing" + "knative.dev/pkg/leaderelection" . "knative.dev/pkg/logging/testing" + "knative.dev/pkg/reconciler" . "knative.dev/pkg/testing" ) @@ -707,7 +714,7 @@ func TestEnqueues(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) impl := NewImplWithStats(&NopReconciler{}, TestLogger(t), "Testing", &FakeStatsReporter{}) test.work(impl) @@ -724,7 +731,7 @@ func TestEnqueues(t *testing.T) { } func TestEnqeueAfter(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) impl := NewImplWithStats(&NopReconciler{}, TestLogger(t), "Testing", &FakeStatsReporter{}) impl.EnqueueAfter(&Resource{ ObjectMeta: metav1.ObjectMeta{ @@ -760,7 +767,7 @@ func TestEnqeueAfter(t *testing.T) { } func TestEnqeueKeyAfter(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) impl := NewImplWithStats(&NopReconciler{}, TestLogger(t), "Testing", &FakeStatsReporter{}) impl.EnqueueKeyAfter(types.NamespacedName{Namespace: "waiting", Name: "for"}, 5*time.Second) impl.EnqueueKeyAfter(types.NamespacedName{Namespace: "the", Name: "waterfall"}, 500*time.Millisecond) @@ -782,22 +789,23 @@ func TestEnqeueKeyAfter(t *testing.T) { type CountingReconciler struct { m sync.Mutex - Count int + count int } func (cr *CountingReconciler) Reconcile(context.Context, string) error { cr.m.Lock() defer cr.m.Unlock() - cr.Count++ + cr.count++ return nil } func TestStartAndShutdown(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) r := &CountingReconciler{} impl := NewImplWithStats(r, TestLogger(t), "Testing", &FakeStatsReporter{}) ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) doneCh := make(chan struct{}) go func() { @@ -820,18 +828,160 @@ func TestStartAndShutdown(t *testing.T) { // We expect the work to complete. } - if got, want := r.Count, 0; got != want { - t.Errorf("Count = %v, wanted %v", got, want) + if got, want := r.count, 0; got != want { + t.Errorf("count = %v, wanted %v", got, want) + } +} + +type countingLeaderAwareReconciler struct { + reconciler.LeaderAwareFuncs + + m sync.Mutex + count int +} + +var _ reconciler.LeaderAware = (*countingLeaderAwareReconciler)(nil) + +func (cr *countingLeaderAwareReconciler) Reconcile(ctx context.Context, key string) error { + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + if cr.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) { + cr.m.Lock() + defer cr.m.Unlock() + cr.count++ + } + return nil +} + +func TestStartAndShutdownWithLeaderAwareNoElection(t *testing.T) { + t.Cleanup(ClearAll) + promoted := make(chan struct{}) + r := &countingLeaderAwareReconciler{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + close(promoted) + return nil + }, + }, + } + impl := NewImplWithStats(r, TestLogger(t), "Testing", &FakeStatsReporter{}) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + doneCh := make(chan struct{}) + + go func() { + defer close(doneCh) + StartAll(ctx, impl) + }() + + select { + case <-promoted: + // We expect to be promoted immediately, since there is no + // ElectorBuilder attached to the context. + case <-doneCh: + t.Fatal("StartAll finished early.") + case <-time.After(10 * time.Second): + t.Error("Timed out waiting for StartAll.") + } + + cancel() + + select { + case <-time.After(1 * time.Second): + t.Error("Timed out waiting for controller to finish.") + case <-doneCh: + // We expect the work to complete. + } + + if got, want := r.count, 0; got != want { + t.Errorf("reconcile count = %v, wanted %v", got, want) + } +} + +func TestStartAndShutdownWithLeaderAwareWithLostElection(t *testing.T) { + t.Cleanup(ClearAll) + promoted := make(chan struct{}) + r := &countingLeaderAwareReconciler{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + close(promoted) + return nil + }, + }, + } + cc := leaderelection.ComponentConfig{ + Component: "component", + LeaderElect: true, + ResourceLock: "leases", + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, + } + kc := fakekube.NewSimpleClientset( + &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "component.testing.00-of-01", + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: ptr.String("not-us"), + LeaseDurationSeconds: ptr.Int32(3000), + AcquireTime: &metav1.MicroTime{Time: time.Now()}, + RenewTime: &metav1.MicroTime{Time: time.Now().Add(3000 * time.Second)}, + }, + }, + ) + + impl := NewImplWithStats(r, TestLogger(t), "Testing", &FakeStatsReporter{}) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + ctx = leaderelection.WithStandardLeaderElectorBuilder(ctx, kc, cc) + doneCh := make(chan struct{}) + + go func() { + defer close(doneCh) + StartAll(ctx, impl) + }() + + select { + case <-promoted: + t.Fatal("Unexpected promotion.") + case <-time.After(3 * time.Second): + // Wait for 3 seconds for good measure. + case <-doneCh: + t.Error("StartAll finished early.") + } + + cancel() + + select { + case <-time.After(1 * time.Second): + t.Error("Timed out waiting for controller to finish.") + case <-doneCh: + // We expect the work to complete. + } + + if got, want := r.count, 0; got != want { + t.Errorf("reconcile count = %v, wanted %v", got, want) } } func TestStartAndShutdownWithWork(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) r := &CountingReconciler{} reporter := &FakeStatsReporter{} impl := NewImplWithStats(r, TestLogger(t), "Testing", reporter) ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) doneCh := make(chan struct{}) impl.EnqueueKey(types.NamespacedName{Namespace: "foo", Name: "bar"}) @@ -856,11 +1006,11 @@ func TestStartAndShutdownWithWork(t *testing.T) { // We expect the work to complete. } - if got, want := r.Count, 1; got != want { - t.Errorf("Count = %v, wanted %v", got, want) + if got, want := r.count, 1; got != want { + t.Errorf("reconcile count = %v, wanted %v", got, want) } if got, want := impl.WorkQueue.NumRequeues(types.NamespacedName{Namespace: "foo", Name: "bar"}), 0; got != want { - t.Errorf("Count = %v, wanted %v", got, want) + t.Errorf("requeues = %v, wanted %v", got, want) } checkStats(t, reporter, 1, 0, 1, trueString) @@ -902,12 +1052,13 @@ func (er *ErrorReconciler) Reconcile(context.Context, string) error { } func TestStartAndShutdownWithErroringWork(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) r := &ErrorReconciler{} reporter := &FakeStatsReporter{} impl := NewImplWithStats(r, TestLogger(t), "Testing", reporter) ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) doneCh := make(chan struct{}) impl.EnqueueKey(types.NamespacedName{Namespace: "", Name: "bar"}) @@ -956,12 +1107,13 @@ func (er *PermanentErrorReconciler) Reconcile(context.Context, string) error { } func TestStartAndShutdownWithPermanentErroringWork(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) r := &PermanentErrorReconciler{} reporter := &FakeStatsReporter{} impl := NewImplWithStats(r, TestLogger(t), "Testing", reporter) ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) doneCh := make(chan struct{}) impl.EnqueueKey(types.NamespacedName{Namespace: "foo", Name: "bar"}) @@ -1048,11 +1200,12 @@ func (*dummyStore) List() []interface{} { } func TestImplGlobalResync(t *testing.T) { - defer ClearAll() + t.Cleanup(ClearAll) r := &CountingReconciler{} impl := NewImplWithStats(r, TestLogger(t), "Testing", &FakeStatsReporter{}) ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) doneCh := make(chan struct{}) go func() { @@ -1079,7 +1232,7 @@ func TestImplGlobalResync(t *testing.T) { // We expect the work to complete. } - if want, got := 3, r.Count; want != got { + if want, got := 3, r.count; want != got { t.Errorf("GlobalResync: want = %v, got = %v", want, got) } } @@ -1319,6 +1472,7 @@ func TestRunInformersFinished(t *testing.T) { }() ctx, cancel := context.WithCancel(TestContextWithLogger(t)) + t.Cleanup(cancel) waitInformers, err := RunInformers(ctx.Done(), fi) if err != nil { diff --git a/vendor/knative.dev/pkg/go.mod b/vendor/knative.dev/pkg/go.mod index 6f0d03a3c36..601674ec935 100644 --- a/vendor/knative.dev/pkg/go.mod +++ b/vendor/knative.dev/pkg/go.mod @@ -29,7 +29,7 @@ require ( github.com/prometheus/common v0.9.1 github.com/spf13/pflag v1.0.5 github.com/tsenart/vegeta v12.7.1-0.20190725001342-b5f4fca92137+incompatible - go.opencensus.io v0.22.3 + go.opencensus.io v0.22.4 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.14.1 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e @@ -47,8 +47,7 @@ require ( k8s.io/code-generator v0.18.0 k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12 k8s.io/klog v1.0.0 - k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 - knative.dev/test-infra v0.0.0-20200615231324-3a016f44102c + knative.dev/test-infra v0.0.0-20200617235125-6382dba95484 sigs.k8s.io/boskos v0.0.0-20200530174753-71e795271860 ) diff --git a/vendor/knative.dev/pkg/go.sum b/vendor/knative.dev/pkg/go.sum index 7e85df17005..4683234a241 100644 --- a/vendor/knative.dev/pkg/go.sum +++ b/vendor/knative.dev/pkg/go.sum @@ -251,6 +251,7 @@ github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -898,6 +899,8 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1401,8 +1404,8 @@ knative.dev/test-infra v0.0.0-20200522180958-6a0a9b9d893a h1:c0qTABRcNoxZVu5gsry knative.dev/test-infra v0.0.0-20200522180958-6a0a9b9d893a/go.mod h1:n9eQkzmSNj8BiqNFl1lzoz68D09uMeJfyOjc132Gbik= knative.dev/test-infra v0.0.0-20200606045118-14ebc4a42974 h1:CrZmlbB+j3ZF/aTrfyypY5ulX2w7XrkfeXKQsbkqzTg= knative.dev/test-infra v0.0.0-20200606045118-14ebc4a42974/go.mod h1://I6IZIF0QDgs5wotU243ZZ5cTpm6/GthayjUenBBc0= -knative.dev/test-infra v0.0.0-20200615231324-3a016f44102c h1:pzn7d3gVWX6p10CpdSFAYlgFhLwI6hGQ8H4sxQfvob4= -knative.dev/test-infra v0.0.0-20200615231324-3a016f44102c/go.mod h1:+BfrTJpc++rH30gX/C0QY6NT2eYVzycll52uw6CrQnc= +knative.dev/test-infra v0.0.0-20200617235125-6382dba95484 h1:5D1Fm6aA1T1QQXLb1HkJ5t8gB9pTkhLYak1CCqIP+pE= +knative.dev/test-infra v0.0.0-20200617235125-6382dba95484/go.mod h1:+BfrTJpc++rH30gX/C0QY6NT2eYVzycll52uw6CrQnc= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/vendor/knative.dev/pkg/hack/tools.go b/vendor/knative.dev/pkg/hack/tools.go index f37c085773f..22763c5a8c6 100644 --- a/vendor/knative.dev/pkg/hack/tools.go +++ b/vendor/knative.dev/pkg/hack/tools.go @@ -26,9 +26,7 @@ import ( _ "k8s.io/code-generator/cmd/defaulter-gen" _ "k8s.io/code-generator/cmd/informer-gen" _ "k8s.io/code-generator/cmd/lister-gen" - _ "k8s.io/kube-openapi/cmd/openapi-gen" _ "knative.dev/pkg/codegen/cmd/injection-gen" - _ "knative.dev/test-infra/scripts" ) diff --git a/vendor/knative.dev/pkg/hack/update-codegen.sh b/vendor/knative.dev/pkg/hack/update-codegen.sh index d049c42cbc2..8a219833503 100755 --- a/vendor/knative.dev/pkg/hack/update-codegen.sh +++ b/vendor/knative.dev/pkg/hack/update-codegen.sh @@ -19,6 +19,12 @@ set -o nounset set -o pipefail export GO111MODULE=on +# If we run with -mod=vendor here, then generate-groups.sh looks for vendor files in the wrong place. +export GOFLAGS=-mod= + +if [ -z "${GOPATH:-}" ]; then + export GOPATH=$(go env GOPATH) +fi source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh @@ -41,7 +47,7 @@ EXTERNAL_INFORMER_PKG="k8s.io/client-go/informers" \ ${REPO_ROOT_DIR}/hack/generate-knative.sh "injection" \ k8s.io/client-go \ k8s.io/api \ - "admissionregistration:v1beta1 apps:v1 autoscaling:v1,v2beta1 batch:v1,v1beta1 core:v1 rbac:v1" \ + "admissionregistration:v1beta1,v1 apps:v1 autoscaling:v1,v2beta1 batch:v1,v1beta1 core:v1 rbac:v1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ --force-genreconciler-kinds "Namespace" @@ -50,7 +56,7 @@ VERSIONED_CLIENTSET_PKG="k8s.io/apiextensions-apiserver/pkg/client/clientset/cli ${REPO_ROOT_DIR}/hack/generate-knative.sh "injection" \ k8s.io/apiextensions-apiserver/pkg/client \ k8s.io/apiextensions-apiserver/pkg/apis \ - "apiextensions:v1beta1" \ + "apiextensions:v1beta1,v1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ --force-genreconciler-kinds "CustomResourceDefinition" diff --git a/vendor/knative.dev/pkg/injection/README.md b/vendor/knative.dev/pkg/injection/README.md index d30b090bc7f..e23ff32f527 100644 --- a/vendor/knative.dev/pkg/injection/README.md +++ b/vendor/knative.dev/pkg/injection/README.md @@ -471,7 +471,7 @@ reconciler.PreProcessReconcile(ctx, resource) reconcileEvent = r.reconciler.ReconcileKind(ctx, resource) -reconciler.PostProcessReconcile(ctx, resource) +reconciler.PostProcessReconcile(ctx, resource, oldResource) ``` #### Stubs diff --git a/vendor/knative.dev/pkg/injection/sharedmain/main.go b/vendor/knative.dev/pkg/injection/sharedmain/main.go index 1410138e1f0..fdc910ef034 100644 --- a/vendor/knative.dev/pkg/injection/sharedmain/main.go +++ b/vendor/knative.dev/pkg/injection/sharedmain/main.go @@ -52,6 +52,7 @@ import ( "knative.dev/pkg/logging" "knative.dev/pkg/metrics" "knative.dev/pkg/profiling" + "knative.dev/pkg/reconciler" "knative.dev/pkg/signals" "knative.dev/pkg/system" "knative.dev/pkg/version" @@ -231,6 +232,18 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf CheckK8sClientMinimumVersionOrDie(ctx, logger) cmw := SetupConfigMapWatchOrDie(ctx, logger) + + // Set up leader election config + leaderElectionConfig, err := GetLeaderElectionConfig(ctx) + if err != nil { + logger.Fatalf("Error loading leader election configuration: %v", err) + } + leConfig := leaderElectionConfig.GetComponentConfig(component) + if leConfig.LeaderElect { + // Signal that we are executing in a context with leader election. + ctx = kle.WithStandardLeaderElectorBuilder(ctx, kubeclient.Get(ctx), leConfig) + } + controllers, webhooks := ControllersAndWebhooksFromCtors(ctx, cmw, ctors...) WatchLoggingConfigOrDie(ctx, cmw, logger, atomicLevel, component) WatchObservabilityConfigOrDie(ctx, cmw, profilingHandler, logger, component) @@ -241,7 +254,6 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf // If we have one or more admission controllers, then start the webhook // and pass them in. var wh *webhook.Webhook - var err error if len(webhooks) > 0 { // Register webhook metrics webhook.RegisterMetrics() @@ -399,6 +411,11 @@ func SecretFetcher(ctx context.Context) metrics.SecretFetcher { func ControllersAndWebhooksFromCtors(ctx context.Context, cmw *configmap.InformedWatcher, ctors ...injection.ControllerConstructor) ([]*controller.Impl, []interface{}) { + + // Check whether the context has been infused with a leader elector builder. + // If it has, then every reconciler we plan to start MUST implement LeaderAware. + leEnabled := kle.HasLeaderElection(ctx) + controllers := make([]*controller.Impl, 0, len(ctors)) webhooks := make([]interface{}, 0) for _, cf := range ctors { @@ -410,6 +427,12 @@ func ControllersAndWebhooksFromCtors(ctx context.Context, case webhook.AdmissionController, webhook.ConversionController: webhooks = append(webhooks, c) } + + if leEnabled { + if _, ok := ctrl.Reconciler.(reconciler.LeaderAware); !ok { + log.Fatalf("%T is not leader-aware, all reconcilers must be leader-aware to enable fine-grained leader election.", ctrl.Reconciler) + } + } } return controllers, webhooks diff --git a/vendor/knative.dev/pkg/leaderelection/config.go b/vendor/knative.dev/pkg/leaderelection/config.go index 06f0874e2a4..8b7c0205812 100644 --- a/vendor/knative.dev/pkg/leaderelection/config.go +++ b/vendor/knative.dev/pkg/leaderelection/config.go @@ -30,6 +30,10 @@ import ( const configMapNameEnv = "CONFIG_LEADERELECTION_NAME" +// MaxBuckets is the maximum number of buckets to allow users to define. +// This is a variable so that it may be customized in the binary entrypoint. +var MaxBuckets uint32 = 10 + var validResourceLocks = sets.NewString("leases", "configmaps", "endpoints") // NewConfigFromMap returns a Config for the given map, or an error. @@ -43,6 +47,8 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { cm.AsDuration("renewDeadline", &config.RenewDeadline), cm.AsDuration("retryPeriod", &config.RetryPeriod), + cm.AsUint32("buckets", &config.Buckets), + // enabledComponents are not validated here, because they are dependent on // the component. Components should provide additional validation for this // field. @@ -51,6 +57,9 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { return nil, err } + if config.Buckets < 1 || config.Buckets > MaxBuckets { + return nil, fmt.Errorf("buckets: value must be between %d <= %d <= %d", 1, config.Buckets, MaxBuckets) + } if !validResourceLocks.Has(config.ResourceLock) { return nil, fmt.Errorf(`resourceLock: invalid value %q: valid values are "leases","configmaps","endpoints"`, config.ResourceLock) } @@ -72,6 +81,7 @@ func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { // single source repository, viz: serving or eventing. type Config struct { ResourceLock string + Buckets uint32 LeaseDuration time.Duration RenewDeadline time.Duration RetryPeriod time.Duration @@ -83,6 +93,7 @@ func (c *Config) GetComponentConfig(name string) ComponentConfig { return ComponentConfig{ Component: name, LeaderElect: true, + Buckets: c.Buckets, ResourceLock: c.ResourceLock, LeaseDuration: c.LeaseDuration, RenewDeadline: c.RenewDeadline, @@ -96,6 +107,7 @@ func (c *Config) GetComponentConfig(name string) ComponentConfig { func defaultConfig() *Config { return &Config{ ResourceLock: "leases", + Buckets: 1, LeaseDuration: 15 * time.Second, RenewDeadline: 10 * time.Second, RetryPeriod: 2 * time.Second, @@ -107,12 +119,21 @@ func defaultConfig() *Config { type ComponentConfig struct { Component string LeaderElect bool + Buckets uint32 ResourceLock string LeaseDuration time.Duration RenewDeadline time.Duration RetryPeriod time.Duration } +// StatefulSetConfig represents the required information for a StatefulSet service. +type StatefulSetConfig struct { + StatefulSetName string + ServiceName string + Port string + Protocol string +} + func defaultComponentConfig(name string) ComponentConfig { return ComponentConfig{ Component: name, diff --git a/vendor/knative.dev/pkg/leaderelection/config_test.go b/vendor/knative.dev/pkg/leaderelection/config_test.go index 3d37677882f..e2850c8a888 100644 --- a/vendor/knative.dev/pkg/leaderelection/config_test.go +++ b/vendor/knative.dev/pkg/leaderelection/config_test.go @@ -18,6 +18,8 @@ package leaderelection import ( "errors" + "fmt" + "strconv" "testing" "time" @@ -30,6 +32,7 @@ import ( func okConfig() *Config { return &Config{ ResourceLock: "leases", + Buckets: 1, LeaseDuration: 15 * time.Second, RenewDeadline: 10 * time.Second, RetryPeriod: 2 * time.Second, @@ -40,6 +43,7 @@ func okConfig() *Config { func okData() map[string]string { return map[string]string{ "resourceLock": "leases", + "buckets": "1", // values in this data come from the defaults suggested in the // code: // https://github.com/kubernetes/client-go/blob/kubernetes-1.16.0/tools/leaderelection/leaderelection.go @@ -72,6 +76,17 @@ func TestNewConfigMapFromData(t *testing.T) { config.EnabledComponents.Insert("controller") return config }(), + }, { + name: "OK config - controller enabled with multiple buckets", + data: kmeta.UnionMaps(okData(), map[string]string{ + "buckets": "5", + }), + expected: func() *Config { + config := okConfig() + config.EnabledComponents.Insert("controller") + config.Buckets = 5 + return config + }(), }, { name: "invalid resourceLock", data: kmeta.UnionMaps(okData(), map[string]string{ @@ -96,6 +111,24 @@ func TestNewConfigMapFromData(t *testing.T) { "retryPeriod": "flops", }), err: errors.New(`failed to parse "retryPeriod": time: invalid duration flops`), + }, { + name: "invalid buckets - not an int", + data: kmeta.UnionMaps(okData(), map[string]string{ + "buckets": "not-an-int", + }), + err: errors.New(`failed to parse "buckets": strconv.ParseUint: parsing "not-an-int": invalid syntax`), + }, { + name: "invalid buckets - too small", + data: kmeta.UnionMaps(okData(), map[string]string{ + "buckets": "0", + }), + err: fmt.Errorf("buckets: value must be between 1 <= 0 <= %d", MaxBuckets), + }, { + name: "invalid buckets - too large", + data: kmeta.UnionMaps(okData(), map[string]string{ + "buckets": strconv.Itoa(int(MaxBuckets + 1)), + }), + err: fmt.Errorf(`buckets: value must be between 1 <= %d <= %d`, MaxBuckets+1, MaxBuckets), }} for _, tc := range cases { diff --git a/vendor/knative.dev/pkg/leaderelection/context.go b/vendor/knative.dev/pkg/leaderelection/context.go new file mode 100644 index 00000000000..d273a572e2f --- /dev/null +++ b/vendor/knative.dev/pkg/leaderelection/context.go @@ -0,0 +1,262 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "context" + "fmt" + "hash/fnv" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "knative.dev/pkg/logging" + "knative.dev/pkg/network" + "knative.dev/pkg/reconciler" + "knative.dev/pkg/system" +) + +// WithStandardLeaderElectorBuilder infuses a context with the ability to build +// LeaderElectors with the provided component configuration acquiring resource +// locks via the provided kubernetes client. +func WithStandardLeaderElectorBuilder(ctx context.Context, kc kubernetes.Interface, cc ComponentConfig) context.Context { + return context.WithValue(ctx, builderKey{}, &standardBuilder{ + kc: kc, + lec: cc, + }) +} + +// WithStatefulSetLeaderElectorBuilder infuses a context with the ability to build +// Electors which are assigned leadership based on the StatefulSet ordinal from +// the provided component configuration. +func WithStatefulSetLeaderElectorBuilder(ctx context.Context, cc ComponentConfig, ssc StatefulSetConfig) context.Context { + return context.WithValue(ctx, builderKey{}, &statefulSetBuilder{ + lec: cc, + ssc: ssc, + }) +} + +// HasLeaderElection returns whether there is leader election configuration +// associated with the context +func HasLeaderElection(ctx context.Context) bool { + val := ctx.Value(builderKey{}) + return val != nil +} + +// Elector is the interface for running a leader elector. +type Elector interface { + Run(context.Context) +} + +// BuildElector builds a leaderelection.LeaderElector for the named LeaderAware +// reconciler using a builder added to the context via WithStandardLeaderElectorBuilder. +func BuildElector(ctx context.Context, la reconciler.LeaderAware, name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) { + if val := ctx.Value(builderKey{}); val != nil { + switch builder := val.(type) { + case *standardBuilder: + return builder.BuildElector(ctx, la, name, enq) + case *statefulSetBuilder: + return builder.BuildElector(ctx, la, enq) + } + } + + return &unopposedElector{ + la: la, + bkt: reconciler.UniversalBucket(), + enq: enq, + }, nil +} + +type builderKey struct{} + +type standardBuilder struct { + kc kubernetes.Interface + lec ComponentConfig +} + +func (b *standardBuilder) BuildElector(ctx context.Context, la reconciler.LeaderAware, name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) { + logger := logging.FromContext(ctx) + + id, err := UniqueID() + if err != nil { + return nil, err + } + + buckets := make([]Elector, 0, b.lec.Buckets) + for i := uint32(0); i < b.lec.Buckets; i++ { + bkt := &bucket{ + // The resource name is the lowercase: + // {component}.{workqueue}.{index}-of-{total} + name: strings.ToLower(fmt.Sprintf("%s.%s.%02d-of-%02d", b.lec.Component, name, i, b.lec.Buckets)), + index: i, + total: b.lec.Buckets, + } + + rl, err := resourcelock.New(b.lec.ResourceLock, + system.Namespace(), // use namespace we are running in + bkt.Name(), + b.kc.CoreV1(), + b.kc.CoordinationV1(), + resourcelock.ResourceLockConfig{ + Identity: id, + }) + if err != nil { + return nil, err + } + logger.Infof("%s will run in leader-elected mode with id %q", bkt.Name(), rl.Identity()) + + le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: rl, + LeaseDuration: b.lec.LeaseDuration, + RenewDeadline: b.lec.RenewDeadline, + RetryPeriod: b.lec.RetryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(context.Context) { + logger.Infof("%q has started leading %q", rl.Identity(), bkt.Name()) + if err := la.Promote(bkt, enq); err != nil { + // TODO(mattmoor): We expect this to effectively never happen, + // but if it does, we should support wrapping `le` in an elector + // we can cancel here. + logger.Fatalf("%q failed to Promote: %v", rl.Identity(), err) + } + }, + OnStoppedLeading: func() { + logger.Infof("%q has stopped leading %q", rl.Identity(), bkt.Name()) + la.Demote(bkt) + }, + }, + ReleaseOnCancel: true, + + Name: rl.Identity(), + }) + if err != nil { + return nil, err + } + // TODO: use health check watchdog, knative/pkg#1048 + // if lec.WatchDog != nil { + // lec.WatchDog.SetLeaderElection(le) + // } + buckets = append(buckets, &runUntilCancelled{Elector: le}) + } + return &runAll{les: buckets}, nil +} + +type statefulSetBuilder struct { + lec ComponentConfig + ssc StatefulSetConfig +} + +func (b *statefulSetBuilder) BuildElector(ctx context.Context, la reconciler.LeaderAware, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) { + logger := logging.FromContext(ctx) + + ordinal, err := ControllerOrdinal() + if err != nil { + return nil, err + } + + logger.Infof("%s will run in StatefulSet ordinal assignement mode with ordinal %d", b.lec.Component, ordinal) + + return &unopposedElector{ + bkt: &bucket{ + // The name is the full pod DNS of the owner pod of this bucket. + name: fmt.Sprintf("%s://%s-%d.%s.%s.svc.%s:%s", b.ssc.Protocol, + b.ssc.StatefulSetName, ordinal, b.ssc.ServiceName, + system.Namespace(), network.GetClusterDomainName(), b.ssc.Port), + index: uint32(ordinal), + total: b.lec.Buckets, + }, + la: la, + enq: enq, + }, nil +} + +// unopposedElector promotes when run without needing to be elected. +type unopposedElector struct { + bkt reconciler.Bucket + la reconciler.LeaderAware + enq func(reconciler.Bucket, types.NamespacedName) +} + +// Run implements Elector +func (ue *unopposedElector) Run(ctx context.Context) { + ue.la.Promote(ue.bkt, ue.enq) +} + +type runAll struct { + les []Elector +} + +// Run implements Elector +func (ra *runAll) Run(ctx context.Context) { + sg := sync.WaitGroup{} + defer sg.Wait() + + for _, le := range ra.les { + sg.Add(1) + go func(le Elector) { + defer sg.Done() + le.Run(ctx) + }(le) + } +} + +// runUntilCancelled wraps a single-term Elector into one that runs until +// the passed context is cancelled. +type runUntilCancelled struct { + // Elector is a single-term elector as we get from K8s leaderelection package. + Elector +} + +// Run implements Elector +func (ruc *runUntilCancelled) Run(ctx context.Context) { + // Turn the single-term elector into a continuous election cycle. + for { + ruc.Elector.Run(ctx) + select { + case <-ctx.Done(): + return // Run quit because context was cancelled, we are done! + default: + // Context wasn't cancelled, start over. + } + } +} + +type bucket struct { + name string + + // We are bucket {index} of {total} + index uint32 + total uint32 +} + +var _ reconciler.Bucket = (*bucket)(nil) + +// Name implements reconciler.Bucket +func (b *bucket) Name() string { + return b.name +} + +// Has implements reconciler.Bucket +func (b *bucket) Has(nn types.NamespacedName) bool { + h := fnv.New32a() + h.Write([]byte(nn.Namespace + "." + nn.Name)) + ii := h.Sum32() % b.total + return b.index == ii +} diff --git a/vendor/knative.dev/pkg/leaderelection/context_test.go b/vendor/knative.dev/pkg/leaderelection/context_test.go new file mode 100644 index 00000000000..76e75caaeaf --- /dev/null +++ b/vendor/knative.dev/pkg/leaderelection/context_test.go @@ -0,0 +1,215 @@ +// +build !race +// TODO(https://github.com/kubernetes/kubernetes/issues/90952): Remove the above. + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "context" + "os" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + fakekube "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "knative.dev/pkg/reconciler" + _ "knative.dev/pkg/system/testing" +) + +func TestWithBuilder(t *testing.T) { + cc := ComponentConfig{ + Component: "component", + LeaderElect: true, + Buckets: 1, + ResourceLock: "leases", + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, + } + kc := fakekube.NewSimpleClientset() + ctx := context.Background() + + promoted := make(chan struct{}) + demoted := make(chan struct{}) + laf := &reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + close(promoted) + return nil + }, + DemoteFunc: func(bkt reconciler.Bucket) { + close(demoted) + }, + } + enq := func(reconciler.Bucket, types.NamespacedName) {} + + created := make(chan struct{}) + kc.PrependReactor("create", "leases", + func(action ktesting.Action) (bool, runtime.Object, error) { + close(created) + return false, nil, nil + }, + ) + + updated := make(chan struct{}) + kc.PrependReactor("update", "leases", + func(action ktesting.Action) (bool, runtime.Object, error) { + // Only close update once. + select { + case <-updated: + default: + close(updated) + } + return false, nil, nil + }, + ) + + if HasLeaderElection(ctx) { + t.Error("HasLeaderElection() = true, wanted false") + } + if le, err := BuildElector(ctx, laf, "name", enq); err != nil { + t.Errorf("BuildElector() = %v, wanted an unopposedElector", err) + } else if _, ok := le.(*unopposedElector); !ok { + t.Errorf("BuildElector() = %T, wanted an unopposedElector", le) + } + + ctx = WithStandardLeaderElectorBuilder(ctx, kc, cc) + if !HasLeaderElection(ctx) { + t.Error("HasLeaderElection() = false, wanted true") + } + + le, err := BuildElector(ctx, laf, "name", enq) + if err != nil { + t.Fatalf("BuildElector() = %v", err) + } + + // We shouldn't see leases until we Run the elector. + select { + case <-promoted: + t.Error("Got promoted, want no actions.") + case <-demoted: + t.Error("Got demoted, want no actions.") + case <-created: + t.Error("Got created, want no actions.") + case <-updated: + t.Error("Got updated, want no actions.") + default: + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go le.Run(ctx) + + select { + case <-created: + // We expect the lease to be created. + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for lease creation.") + } + select { + case <-promoted: + // We expect to have been promoted. + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for promotion.") + } + + // Cancelling the context should case us to give up leadership. + cancel() + + select { + case <-updated: + // We expect the lease to be updated. + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for lease update.") + } + select { + case <-demoted: + // We expect to have been demoted. + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for demotion.") + } +} + +func TestWithStatefulSetBuilder(t *testing.T) { + cc := ComponentConfig{ + Component: "component", + LeaderElect: true, + Buckets: 1, + } + podDNS := "ws://as-0.autoscaler.knative-testing.svc.cluster.local:8080" + ctx := context.Background() + + promoted := make(chan struct{}) + laf := &reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + close(promoted) + return nil + }, + } + enq := func(reconciler.Bucket, types.NamespacedName) {} + + ctx = WithStatefulSetLeaderElectorBuilder(ctx, cc, StatefulSetConfig{ + ServiceName: "autoscaler", + StatefulSetName: "as", + Protocol: "ws", + Port: "8080", + }) + if !HasLeaderElection(ctx) { + t.Error("HasLeaderElection() = false, wanted true") + } + + le, err := BuildElector(ctx, laf, "name", enq) + if err == nil { + // controller ordinal env not set + t.Error("expected BuildElector() returns error but got none") + } + + os.Setenv(controllerOrdinalEnv, "as-0") + defer os.Unsetenv(controllerOrdinalEnv) + le, err = BuildElector(ctx, laf, "name", enq) + if err != nil { + t.Fatalf("BuildElector() = %v", err) + } + + ule, ok := le.(*unopposedElector) + if !ok { + t.Fatalf("BuildElector() = %T, wanted an unopposedElector", le) + } + if got, want := ule.bkt.Name(), podDNS; got != want { + t.Errorf("bkt.Name() = %s, wanted %s", got, want) + } + + // Shouldn't be promoted until we Run the elector. + select { + case <-promoted: + t.Error("Got promoted, want no actions.") + default: + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go le.Run(ctx) + + select { + case <-promoted: + // We expect to have been promoted. + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for promotion.") + } +} diff --git a/vendor/knative.dev/pkg/leaderelection/ordinal.go b/vendor/knative.dev/pkg/leaderelection/ordinal.go new file mode 100644 index 00000000000..94a3775a0dd --- /dev/null +++ b/vendor/knative.dev/pkg/leaderelection/ordinal.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// If run a process on Kubernetes, the value of this environment variable +// should be set to the pod name via the downward API. +const controllerOrdinalEnv = "CONTROLLER_ORDINAL" + +// ControllerOrdinal tries to get ordinal from the pod name of a StatefulSet, +// which is provided from the environment variable CONTROLLER_ORDINAL. +func ControllerOrdinal() (uint64, error) { + v := os.Getenv(controllerOrdinalEnv) + if i := strings.LastIndex(v, "-"); i != -1 { + return strconv.ParseUint(v[i+1:], 10, 64) + } + + return 0, fmt.Errorf("ordinal not found in %s=%s", controllerOrdinalEnv, v) +} diff --git a/vendor/knative.dev/pkg/leaderelection/ordinal_test.go b/vendor/knative.dev/pkg/leaderelection/ordinal_test.go new file mode 100644 index 00000000000..ce300a33bfa --- /dev/null +++ b/vendor/knative.dev/pkg/leaderelection/ordinal_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "fmt" + "os" + "testing" +) + +func TestControllerOrdinal(t *testing.T) { + testCases := []struct { + testname string + podName string + want uint64 + err error + }{{ + testname: "NotSet", + err: fmt.Errorf("ordinal not found in %s=", controllerOrdinalEnv), + }, { + testname: "NoHyphen", + podName: "as", + err: fmt.Errorf("ordinal not found in %s=as", controllerOrdinalEnv), + }, { + testname: "InvalidOrdinal", + podName: "as-invalid", + err: fmt.Errorf(`strconv.ParseUint: parsing "invalid": invalid syntax`), + }, { + testname: "ValidName", + podName: "as-0", + }, { + testname: "ValidName", + podName: "as-1", + want: 1, + }} + + defer os.Unsetenv(controllerOrdinalEnv) + for _, tt := range testCases { + t.Run(tt.testname, func(t *testing.T) { + if tt.podName != "" { + if os.Setenv(controllerOrdinalEnv, tt.podName) != nil { + t.Fatalf("fail to set env var %s=%s", controllerOrdinalEnv, tt.podName) + } + } + + got, gotErr := ControllerOrdinal() + if tt.err != nil { + if gotErr == nil || gotErr.Error() != tt.err.Error() { + t.Errorf("got %v, want = %v, ", gotErr, tt.err) + } + } else if gotErr != nil { + t.Error("ControllerOrdinal() =", gotErr) + } else if got != tt.want { + t.Errorf("ControllerOrdinal() = %d, want = %d", got, tt.want) + } + }) + } +} diff --git a/vendor/knative.dev/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go index 5560012559c..d44a22f626a 100644 --- a/vendor/knative.dev/pkg/metrics/config.go +++ b/vendor/knative.dev/pkg/metrics/config.go @@ -37,40 +37,44 @@ import ( type metricsBackend string const ( + // BackendDestinationKey points to the config map entry key for metrics backend destination. + BackendDestinationKey = "metrics.backend-destination" + // DomainEnv points to the metrics domain env var. + DomainEnv = "METRICS_DOMAIN" + // The following keys are used to configure metrics reporting. // See https://github.com/knative/serving/blob/master/config/config-observability.yaml // for details. - AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics" - BackendDestinationKey = "metrics.backend-destination" - ReportingPeriodKey = "metrics.reporting-period-seconds" - // Stackdriver client configuration keys - StackdriverProjectIDKey = "metrics.stackdriver-project-id" - StackdriverGCPLocationKey = "metrics.stackdriver-gcp-location" - StackdriverClusterNameKey = "metrics.stackdriver-cluster-name" - StackdriverUseSecretKey = "metrics.stackdriver-use-secret" - StackdriverCustomMetricSubDomainKey = "metrics.stackdriver-custom-metrics-subdomain" - - DomainEnv = "METRICS_DOMAIN" + allowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics" + collectorAddressKey = "metrics.opencensus-address" + collectorSecureKey = "metrics.opencensus-require-tls" + reportingPeriodKey = "metrics.reporting-period-seconds" - // Stackdriver is used for Stackdriver backend - Stackdriver metricsBackend = "stackdriver" - // Prometheus is used for Prometheus backend - Prometheus metricsBackend = "prometheus" - // OpenCensus is used to export to the OpenCensus Agent / Collector, - // which can send to many other services. - OpenCensus metricsBackend = "opencensus" - // None is used to export, well, nothing. - None metricsBackend = "none" + // Stackdriver client configuration keys + stackdriverClusterNameKey = "metrics.stackdriver-cluster-name" + stackdriverCustomMetricSubDomainKey = "metrics.stackdriver-custom-metrics-subdomain" + stackdriverGCPLocationKey = "metrics.stackdriver-gcp-location" + stackdriverProjectIDKey = "metrics.stackdriver-project-id" + stackdriverUseSecretKey = "metrics.stackdriver-use-secret" defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" - - CollectorAddressKey = "metrics.opencensus-address" - CollectorSecureKey = "metrics.opencensus-require-tls" - - prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" defaultPrometheusPort = 9090 maxPrometheusPort = 65535 minPrometheusPort = 1024 + prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" +) + +// Metrics backend "enum". +const ( + // stackdriver is used for Stackdriver backend + stackdriver metricsBackend = "stackdriver" + // prometheus is used for Prometheus backend + prometheus metricsBackend = "prometheus" + // openCensus is used to export to the OpenCensus Agent / Collector, + // which can send to many other services. + openCensus metricsBackend = "opencensus" + // none is used to export, well, nothing. + none metricsBackend = "none" ) type metricsConfig struct { @@ -142,10 +146,10 @@ type StackdriverClientConfig struct { // NewStackdriverClientConfigFromMap creates a stackdriverClientConfig from the given map func NewStackdriverClientConfigFromMap(config map[string]string) *StackdriverClientConfig { return &StackdriverClientConfig{ - ProjectID: config[StackdriverProjectIDKey], - GCPLocation: config[StackdriverGCPLocationKey], - ClusterName: config[StackdriverClusterNameKey], - UseSecret: strings.EqualFold(config[StackdriverUseSecretKey], "true"), + ProjectID: config[stackdriverProjectIDKey], + GCPLocation: config[stackdriverGCPLocationKey], + ClusterName: config[stackdriverClusterNameKey], + UseSecret: strings.EqualFold(config[stackdriverUseSecretKey], "true"), } } @@ -184,7 +188,7 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri backend := os.Getenv(defaultBackendEnvName) if backend == "" { // Use Prometheus if DEFAULT_METRICS_BACKEND does not exist or is empty - backend = string(Prometheus) + backend = string(prometheus) } // Override backend if it is set in the config map. if backendFromConfig, ok := m[BackendDestinationKey]; ok { @@ -192,18 +196,18 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri } lb := metricsBackend(strings.ToLower(backend)) switch lb { - case Stackdriver, Prometheus, OpenCensus: + case stackdriver, prometheus, openCensus: mc.backendDestination = lb default: return nil, fmt.Errorf("unsupported metrics backend value %q", backend) } - if mc.backendDestination == OpenCensus { - mc.collectorAddress = ops.ConfigMap[CollectorAddressKey] - if isSecure := ops.ConfigMap[CollectorSecureKey]; isSecure != "" { + if mc.backendDestination == openCensus { + mc.collectorAddress = ops.ConfigMap[collectorAddressKey] + if isSecure := ops.ConfigMap[collectorSecureKey]; isSecure != "" { var err error if mc.requireSecure, err = strconv.ParseBool(isSecure); err != nil { - return nil, fmt.Errorf("invalid %s value %q", CollectorSecureKey, isSecure) + return nil, fmt.Errorf("invalid %s value %q", collectorSecureKey, isSecure) } if mc.requireSecure { @@ -215,7 +219,7 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri } } - if mc.backendDestination == Prometheus { + if mc.backendDestination == prometheus { pp := ops.PrometheusPort if pp == 0 { var err error @@ -236,7 +240,7 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri // If stackdriverClientConfig is not provided for stackdriver backend destination, OpenCensus will try to // use the application default credentials. If that is not available, Opencensus would fail to create the // metrics exporter. - if mc.backendDestination == Stackdriver { + if mc.backendDestination == stackdriver { scc := NewStackdriverClientConfigFromMap(m) mc.stackdriverClientConfig = *scc mc.isStackdriverBackend = true @@ -244,15 +248,15 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri var err error mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component) - customMetricsSubDomain := m[StackdriverCustomMetricSubDomainKey] + customMetricsSubDomain := m[stackdriverCustomMetricSubDomainKey] if customMetricsSubDomain == "" { customMetricsSubDomain = defaultCustomMetricSubDomain } mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, customMetricsSubDomain, mc.component) - if ascmStr := m[AllowStackdriverCustomMetricsKey]; ascmStr != "" { + if ascmStr := m[allowStackdriverCustomMetricsKey]; ascmStr != "" { allowCustomMetrics, err = strconv.ParseBool(ascmStr) if err != nil { - return nil, fmt.Errorf("invalid %s value %q", AllowStackdriverCustomMetricsKey, ascmStr) + return nil, fmt.Errorf("invalid %s value %q", allowStackdriverCustomMetricsKey, ascmStr) } } @@ -297,15 +301,15 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri // For Prometheus, we will use a lower value since the exporter doesn't // push anything but just responds to pull requests, and shorter durations // do not really hurt the performance and we rely on the scraping configuration. - if repStr, ok := m[ReportingPeriodKey]; ok && repStr != "" { + if repStr, ok := m[reportingPeriodKey]; ok && repStr != "" { repInt, err := strconv.Atoi(repStr) if err != nil { - return nil, fmt.Errorf("invalid %s value %q", ReportingPeriodKey, repStr) + return nil, fmt.Errorf("invalid %s value %q", reportingPeriodKey, repStr) } mc.reportingPeriod = time.Duration(repInt) * time.Second - } else if mc.backendDestination == Stackdriver { + } else if mc.backendDestination == stackdriver { mc.reportingPeriod = 60 * time.Second - } else if mc.backendDestination == Prometheus { + } else if mc.backendDestination == prometheus { mc.reportingPeriod = 5 * time.Second } diff --git a/vendor/knative.dev/pkg/metrics/config_test.go b/vendor/knative.dev/pkg/metrics/config_test.go index 3831dd66fbd..8f985791818 100644 --- a/vendor/knative.dev/pkg/metrics/config_test.go +++ b/vendor/knative.dev/pkg/metrics/config_test.go @@ -69,7 +69,7 @@ var ( ops: ExporterOptions{ ConfigMap: map[string]string{ BackendDestinationKey: "unsupported", - StackdriverProjectIDKey: testProj, + stackdriverProjectIDKey: testProj, }, Domain: servingDomain, Component: testComponent, @@ -79,7 +79,7 @@ var ( name: "emptyDomain", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, Domain: "", Component: testComponent, @@ -89,7 +89,7 @@ var ( name: "invalidComponent", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(OpenCensus), + BackendDestinationKey: string(openCensus), }, Domain: servingDomain, Component: "", @@ -99,40 +99,40 @@ var ( name: "invalidReportingPeriod", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(OpenCensus), - ReportingPeriodKey: "test", + BackendDestinationKey: string(openCensus), + reportingPeriodKey: "test", }, Domain: servingDomain, Component: testComponent, }, - expectedErr: "invalid " + ReportingPeriodKey + ` value "test"`, + expectedErr: "invalid " + reportingPeriodKey + ` value "test"`, }, { name: "invalidOpenCensusSecuritySetting", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(OpenCensus), - CollectorSecureKey: "yep", + BackendDestinationKey: string(openCensus), + collectorSecureKey: "yep", }, Domain: servingDomain, Component: testComponent, }, - expectedErr: "invalid " + CollectorSecureKey + ` value "yep"`, + expectedErr: "invalid " + collectorSecureKey + ` value "yep"`, }, { name: "invalidAllowStackdriverCustomMetrics", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - AllowStackdriverCustomMetricsKey: "test", + BackendDestinationKey: string(stackdriver), + allowStackdriverCustomMetricsKey: "test", }, Domain: servingDomain, Component: testComponent, }, - expectedErr: "invalid " + AllowStackdriverCustomMetricsKey + ` value "test"`, + expectedErr: "invalid " + allowStackdriverCustomMetricsKey + ` value "test"`, }, { name: "tooSmallPrometheusPort", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, Domain: servingDomain, Component: testComponent, @@ -143,7 +143,7 @@ var ( name: "tooBigPrometheusPort", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, Domain: servingDomain, Component: testComponent, @@ -161,7 +161,7 @@ var ( name: "stackdriverProjectIDMissing", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), + BackendDestinationKey: string(stackdriver), }, Domain: servingDomain, Component: testComponent, @@ -169,7 +169,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -186,7 +186,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: defaultPrometheusPort, }, @@ -195,11 +195,11 @@ var ( name: "validStackdriver", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: anotherProj, - StackdriverGCPLocationKey: "us-west1", - StackdriverClusterNameKey: "cluster", - StackdriverUseSecretKey: "true", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: anotherProj, + stackdriverGCPLocationKey: "us-west1", + stackdriverClusterNameKey: "cluster", + stackdriverUseSecretKey: "true", }, Domain: servingDomain, Component: testComponent, @@ -213,7 +213,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -236,9 +236,9 @@ var ( name: "validPartialStackdriver", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: anotherProj, - StackdriverClusterNameKey: "cluster", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: anotherProj, + stackdriverClusterNameKey: "cluster", }, Domain: servingDomain, Component: testComponent, @@ -246,7 +246,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -261,9 +261,9 @@ var ( name: "validOpenCensusSettings", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(OpenCensus), - CollectorAddressKey: "external-svc:55678", - CollectorSecureKey: "true", + BackendDestinationKey: string(openCensus), + collectorAddressKey: "external-svc:55678", + collectorSecureKey: "true", }, Domain: servingDomain, Component: testComponent, @@ -280,7 +280,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: OpenCensus, + backendDestination: openCensus, collectorAddress: "external-svc:55678", requireSecure: true, secret: &corev1.Secret{ @@ -298,7 +298,7 @@ var ( name: "validPrometheus", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, Domain: servingDomain, Component: testComponent, @@ -306,7 +306,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: defaultPrometheusPort, }, @@ -316,7 +316,7 @@ var ( ops: ExporterOptions{ ConfigMap: map[string]string{ BackendDestinationKey: "Stackdriver", - StackdriverProjectIDKey: testProj, + stackdriverProjectIDKey: testProj, }, Domain: servingDomain, Component: testComponent, @@ -324,7 +324,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -338,8 +338,8 @@ var ( name: "overriddenReportingPeriodPrometheus", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), - ReportingPeriodKey: "12", + BackendDestinationKey: string(prometheus), + reportingPeriodKey: "12", }, Domain: servingDomain, Component: testComponent, @@ -347,7 +347,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 12 * time.Second, prometheusPort: defaultPrometheusPort, }, @@ -356,9 +356,9 @@ var ( name: "overriddenReportingPeriodStackdriver", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: "test2", - ReportingPeriodKey: "7", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: "test2", + reportingPeriodKey: "7", }, Domain: servingDomain, Component: testComponent, @@ -366,7 +366,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 7 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -380,9 +380,9 @@ var ( name: "overriddenReportingPeriodStackdriver2", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: "test2", - ReportingPeriodKey: "3", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: "test2", + reportingPeriodKey: "3", }, Domain: servingDomain, Component: testComponent, @@ -390,7 +390,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 3 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -403,8 +403,8 @@ var ( name: "emptyReportingPeriodPrometheus", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), - ReportingPeriodKey: "", + BackendDestinationKey: string(prometheus), + reportingPeriodKey: "", }, Domain: servingDomain, Component: testComponent, @@ -412,7 +412,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: defaultPrometheusPort, }, @@ -421,9 +421,9 @@ var ( name: "emptyReportingPeriodStackdriver", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: "test2", - ReportingPeriodKey: "", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: "test2", + reportingPeriodKey: "", }, Domain: servingDomain, Component: testComponent, @@ -431,7 +431,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -445,10 +445,10 @@ var ( name: "allowStackdriverCustomMetric", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: "test2", - ReportingPeriodKey: "", - AllowStackdriverCustomMetricsKey: "true", + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: "test2", + reportingPeriodKey: "", + allowStackdriverCustomMetricsKey: "true", }, Domain: servingDomain, Component: testComponent, @@ -456,7 +456,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -469,10 +469,10 @@ var ( name: "allowStackdriverCustomMetric with subdomain", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Stackdriver), - StackdriverProjectIDKey: "test2", - ReportingPeriodKey: "", - StackdriverCustomMetricSubDomainKey: customSubDomain, + BackendDestinationKey: string(stackdriver), + stackdriverProjectIDKey: "test2", + reportingPeriodKey: "", + stackdriverCustomMetricSubDomainKey: customSubDomain, }, Domain: servingDomain, Component: testComponent, @@ -480,7 +480,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -493,7 +493,7 @@ var ( name: "overridePrometheusPort", ops: ExporterOptions{ ConfigMap: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, Domain: servingDomain, Component: testComponent, @@ -502,7 +502,7 @@ var ( expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: 9091, }, @@ -550,7 +550,7 @@ func TestGetMetricsConfig_fromEnv(t *testing.T) { }{{ name: "Stackdriver backend from env, no config", varName: defaultBackendEnvName, - varValue: string(Stackdriver), + varValue: string(stackdriver), ops: ExporterOptions{ ConfigMap: map[string]string{}, Domain: servingDomain, @@ -559,7 +559,7 @@ func TestGetMetricsConfig_fromEnv(t *testing.T) { expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -568,16 +568,16 @@ func TestGetMetricsConfig_fromEnv(t *testing.T) { }, { name: "Stackdriver backend from env, Prometheus backend from config", varName: defaultBackendEnvName, - varValue: string(Stackdriver), + varValue: string(stackdriver), ops: ExporterOptions{ - ConfigMap: map[string]string{BackendDestinationKey: string(Prometheus)}, + ConfigMap: map[string]string{BackendDestinationKey: string(prometheus)}, Domain: servingDomain, Component: testComponent, }, expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: defaultPrometheusPort, }, @@ -593,7 +593,7 @@ func TestGetMetricsConfig_fromEnv(t *testing.T) { expectedConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: 9999, }, @@ -680,12 +680,12 @@ func TestIsNewExporterRequired(t *testing.T) { oldConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, }, newConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, ClusterName: "cluster", @@ -697,7 +697,7 @@ func TestIsNewExporterRequired(t *testing.T) { oldConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -706,7 +706,7 @@ func TestIsNewExporterRequired(t *testing.T) { newConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -729,7 +729,7 @@ func TestIsNewExporterRequired(t *testing.T) { oldConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: "proj1", }, @@ -737,7 +737,7 @@ func TestIsNewExporterRequired(t *testing.T) { newConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: "proj2", }, @@ -748,7 +748,7 @@ func TestIsNewExporterRequired(t *testing.T) { oldConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, ClusterName: "cluster1", @@ -757,7 +757,7 @@ func TestIsNewExporterRequired(t *testing.T) { newConfig: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, ClusterName: "cluster2", @@ -948,10 +948,10 @@ func TestNewStackdriverConfigFromMap(t *testing.T) { }{{ name: "fullSdConfig", stringMap: map[string]string{ - StackdriverProjectIDKey: "project", - StackdriverGCPLocationKey: "us-west1", - StackdriverClusterNameKey: "cluster", - StackdriverUseSecretKey: "true", + stackdriverProjectIDKey: "project", + stackdriverGCPLocationKey: "us-west1", + stackdriverClusterNameKey: "cluster", + stackdriverUseSecretKey: "true", }, expectedConfig: StackdriverClientConfig{ ProjectID: "project", @@ -966,9 +966,9 @@ func TestNewStackdriverConfigFromMap(t *testing.T) { }, { name: "partialSdConfig", stringMap: map[string]string{ - StackdriverProjectIDKey: "project", - StackdriverGCPLocationKey: "us-west1", - StackdriverClusterNameKey: "cluster", + stackdriverProjectIDKey: "project", + stackdriverGCPLocationKey: "us-west1", + stackdriverClusterNameKey: "cluster", }, expectedConfig: StackdriverClientConfig{ ProjectID: "project", @@ -1001,22 +1001,22 @@ func TestStackdriverRecord(t *testing.T) { }{ "non-stackdriver": { opts: map[string]string{ - BackendDestinationKey: string(Prometheus), + BackendDestinationKey: string(prometheus), }, servedCounter: 1, statCounter: 1, }, "stackdriver with custom metrics": { opts: map[string]string{ - BackendDestinationKey: string(Stackdriver), - AllowStackdriverCustomMetricsKey: "true", + BackendDestinationKey: string(stackdriver), + allowStackdriverCustomMetricsKey: "true", }, servedCounter: 1, statCounter: 1, }, "stackdriver no custom metrics": { opts: map[string]string{ - BackendDestinationKey: string(Stackdriver), + BackendDestinationKey: string(stackdriver), }, servedCounter: 1, statCounter: 0, @@ -1045,11 +1045,11 @@ func TestStackdriverRecord(t *testing.T) { {Measure: servedCount, Aggregation: view.Count()}, {Measure: statCount, Aggregation: view.Count()}, } - err = view.Register(v...) + err = RegisterResourceView(v...) if err != nil { t.Errorf("Failed to register %+v in stats backend: %v", v, err) } - defer view.Unregister(v...) + defer UnregisterResourceView(v...) // Try recording each metric and checking the result. Record(ctx, servedCount.M(1)) diff --git a/vendor/knative.dev/pkg/metrics/exporter.go b/vendor/knative.dev/pkg/metrics/exporter.go index 544ab3828c8..4c24793820d 100644 --- a/vendor/knative.dev/pkg/metrics/exporter.go +++ b/vendor/knative.dev/pkg/metrics/exporter.go @@ -196,11 +196,11 @@ func isNewExporterRequired(newConfig *metricsConfig) bool { // If the OpenCensus address has changed, restart the exporter. // TODO(evankanderson): Should we just always restart the opencensus agent? - if newConfig.backendDestination == OpenCensus { + if newConfig.backendDestination == openCensus { return newConfig.collectorAddress != cc.collectorAddress || newConfig.requireSecure != cc.requireSecure } - return newConfig.backendDestination == Stackdriver && newConfig.stackdriverClientConfig != cc.stackdriverClientConfig + return newConfig.backendDestination == stackdriver && newConfig.stackdriverClientConfig != cc.stackdriverClientConfig } // newMetricsExporter gets a metrics exporter based on the config. @@ -218,13 +218,13 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view. var err error var e view.Exporter switch config.backendDestination { - case OpenCensus: + case openCensus: e, err = newOpenCensusExporter(config, logger) - case Stackdriver: + case stackdriver: e, err = newStackdriverExporter(config, logger) - case Prometheus: + case prometheus: e, err = newPrometheusExporter(config, logger) - case None: + case none: e, err = nil, nil default: err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination) diff --git a/vendor/knative.dev/pkg/metrics/exporter_test.go b/vendor/knative.dev/pkg/metrics/exporter_test.go index 2a57bdffc9d..59940882ec1 100644 --- a/vendor/knative.dev/pkg/metrics/exporter_test.go +++ b/vendor/knative.dev/pkg/metrics/exporter_test.go @@ -65,7 +65,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: None, + backendDestination: none, }, expectSuccess: true, }, { @@ -73,7 +73,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: "testProj", }, @@ -84,7 +84,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: "test-component", - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: "testProj", }, @@ -93,7 +93,7 @@ func TestMetricsExporter(t *testing.T) { }, { name: "stackdriverConfigOnly", config: &metricsConfig{ - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: "project", GCPLocation: "us-west1", @@ -107,7 +107,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -125,7 +125,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, reportingPeriod: 5 * time.Second, prometheusPort: defaultPrometheusPort, stackdriverClientConfig: StackdriverClientConfig{ @@ -143,7 +143,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -161,7 +161,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -178,7 +178,7 @@ func TestMetricsExporter(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -212,7 +212,7 @@ func TestInterlevedExporters(t *testing.T) { _, err := newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, }, @@ -225,7 +225,7 @@ func TestInterlevedExporters(t *testing.T) { _, err = newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, prometheusPort: 9090}, TestLogger(t)) if err != nil { t.Error(err) @@ -235,7 +235,7 @@ func TestInterlevedExporters(t *testing.T) { _, err = newMetricsExporter(&metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, }, @@ -258,7 +258,7 @@ func TestFlushExporter(t *testing.T) { domain: servingDomain, component: testComponent, reportingPeriod: 1 * time.Minute, - backendDestination: Prometheus, + backendDestination: prometheus, } e, err := newMetricsExporter(c, TestLogger(t)) if err != nil { @@ -273,7 +273,7 @@ func TestFlushExporter(t *testing.T) { c = &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, isStackdriverBackend: true, reportingPeriod: 1 * time.Minute, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), diff --git a/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go b/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go index 7cf8df8cc19..9d74c4019ff 100644 --- a/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go +++ b/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go @@ -17,8 +17,10 @@ limitations under the License. package metricstest import ( + "fmt" "reflect" + "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats/view" "knative.dev/pkg/test" ) @@ -56,14 +58,17 @@ func CheckStatsNotReported(t test.T, names ...string) { // reported are tagged with the tags in wantTags and that wantValue matches reported count. func CheckCountData(t test.T, name string, wantTags map[string]string, wantValue int64) { t.Helper() - if row := checkExactlyOneRow(t, name); row != nil { - checkRowTags(t, row, name, wantTags) + row, err := checkExactlyOneRow(t, name) + if err != nil { + t.Error(err) + return + } + checkRowTags(t, row, name, wantTags) - if s, ok := row.Data.(*view.CountData); !ok { - t.Error("want CountData", "metric", name, "got", reflect.TypeOf(row.Data)) - } else if s.Value != wantValue { - t.Error("Wrong value", "metric", name, "value", s.Value, "want", wantValue) - } + if s, ok := row.Data.(*view.CountData); !ok { + t.Error("want CountData", "metric", name, "got", reflect.TypeOf(row.Data)) + } else if s.Value != wantValue { + t.Error("Wrong value", "metric", name, "value", s.Value, "want", wantValue) } } @@ -72,21 +77,24 @@ func CheckCountData(t test.T, name string, wantTags map[string]string, wantValue // It also checks that expectedMin and expectedMax match the minimum and maximum reported values, respectively. func CheckDistributionData(t test.T, name string, wantTags map[string]string, expectedCount int64, expectedMin float64, expectedMax float64) { t.Helper() - if row := checkExactlyOneRow(t, name); row != nil { - checkRowTags(t, row, name, wantTags) + row, err := checkExactlyOneRow(t, name) + if err != nil { + t.Error(err) + return + } + checkRowTags(t, row, name, wantTags) - if s, ok := row.Data.(*view.DistributionData); !ok { - t.Error("want DistributionData", "metric", name, "got", reflect.TypeOf(row.Data)) - } else { - if s.Count != expectedCount { - t.Error("reporter count wrong", "metric", name, "got", s.Count, "want", expectedCount) - } - if s.Min != expectedMin { - t.Error("reporter count wrong", "metric", name, "got", s.Min, "want", expectedMin) - } - if s.Max != expectedMax { - t.Error("reporter count wrong", "metric", name, "got", s.Max, "want", expectedMax) - } + if s, ok := row.Data.(*view.DistributionData); !ok { + t.Error("want DistributionData", "metric", name, "got", reflect.TypeOf(row.Data)) + } else { + if s.Count != expectedCount { + t.Error("reporter count wrong", "metric", name, "got", s.Count, "want", expectedCount) + } + if s.Min != expectedMin { + t.Error("reporter count wrong", "metric", name, "got", s.Min, "want", expectedMin) + } + if s.Max != expectedMax { + t.Error("reporter count wrong", "metric", name, "got", s.Max, "want", expectedMax) } } } @@ -95,15 +103,19 @@ func CheckDistributionData(t test.T, name string, wantTags map[string]string, ex // are tagged with the tags in wantTags and that expectedCount number of records were reported. func CheckDistributionCount(t test.T, name string, wantTags map[string]string, expectedCount int64) { t.Helper() - if row := checkExactlyOneRow(t, name); row != nil { - checkRowTags(t, row, name, wantTags) + row, err := checkExactlyOneRow(t, name) + if err != nil { + t.Error(err) + return + } + checkRowTags(t, row, name, wantTags) - if s, ok := row.Data.(*view.DistributionData); !ok { - t.Error("want DistributionData", "metric", name, "got", reflect.TypeOf(row.Data)) - } else if s.Count != expectedCount { - t.Error("reporter count wrong", "metric", name, "got", s.Count, "want", expectedCount) - } + if s, ok := row.Data.(*view.DistributionData); !ok { + t.Error("want DistributionData", "metric", name, "got", reflect.TypeOf(row.Data)) + } else if s.Count != expectedCount { + t.Error("reporter count wrong", "metric", name, "got", s.Count, "want", expectedCount) } + } // CheckLastValueData checks the view with a name matching string name to verify that the LastValueData stats @@ -125,14 +137,17 @@ func CheckLastValueData(t test.T, name string, wantTags map[string]string, wantV // reported are tagged with the tags in wantTags and that wantValue matches the reported sum. func CheckSumData(t test.T, name string, wantTags map[string]string, wantValue float64) { t.Helper() - if row := checkExactlyOneRow(t, name); row != nil { - checkRowTags(t, row, name, wantTags) + row, err := checkExactlyOneRow(t, name) + if err != nil { + t.Error(err) + return + } + checkRowTags(t, row, name, wantTags) - if s, ok := row.Data.(*view.SumData); !ok { - t.Error("Wrong type", "metric", name, "got", reflect.TypeOf(row.Data), "want", "SumData") - } else if s.Value != wantValue { - t.Error("Wrong sumdata", "metric", name, "got", s.Value, "want", wantValue) - } + if s, ok := row.Data.(*view.SumData); !ok { + t.Error("Wrong type", "metric", name, "got", reflect.TypeOf(row.Data), "want", "SumData") + } else if s.Value != wantValue { + t.Error("Wrong sumdata", "metric", name, "got", s.Value, "want", wantValue) } } @@ -166,19 +181,29 @@ func lastRow(t test.T, name string) *view.Row { return d[len(d)-1] } -func checkExactlyOneRow(t test.T, name string) *view.Row { - t.Helper() - d, err := view.RetrieveData(name) - if err != nil { - t.Error("Reporter.Report() error", "metric", name, "error", err) - return nil +func checkExactlyOneRow(t test.T, name string) (*view.Row, error) { + // view.Meter implements (and is exposed by) metricproducer.GetAll. Since + // this is a test, reach around and cast these to view.Meter. + var retval *view.Row + for _, producer := range metricproducer.GlobalManager().GetAll() { + meter := producer.(view.Meter) + + d, err := meter.RetrieveData(name) + if err != nil || len(d) == 0 { + continue + } + if len(d) > 1 { + return nil, fmt.Errorf("expected 1 row for metric %q got %d", name, len(d)) + } + if retval != nil { + return nil, fmt.Errorf("got 2 rows from different meters: %+v, %+v", *retval, d[0]) + } + retval = d[0] } - if len(d) != 1 { - t.Error("Reporter.Report() wrong length", "metric", name, "got", len(d), "want", 1) - return nil + if retval == nil { + return nil, fmt.Errorf("could not find row for %q", name) } - - return d[0] + return retval, nil } func checkRowTags(t test.T, row *view.Row, name string, wantTags map[string]string) { diff --git a/vendor/knative.dev/pkg/metrics/opencensus_exporter_test.go b/vendor/knative.dev/pkg/metrics/opencensus_exporter_test.go index 40ba726cda6..c7446a113ac 100644 --- a/vendor/knative.dev/pkg/metrics/opencensus_exporter_test.go +++ b/vendor/knative.dev/pkg/metrics/opencensus_exporter_test.go @@ -51,7 +51,7 @@ func TestOpenCensusConfig(t *testing.T) { config: metricsConfig{ domain: "test", component: "test", - backendDestination: OpenCensus, + backendDestination: openCensus, }, wantFunc: func(t *testing.T, v view.Exporter) { if v == nil { @@ -64,7 +64,7 @@ func TestOpenCensusConfig(t *testing.T) { config: metricsConfig{ domain: "secure", component: "test", - backendDestination: OpenCensus, + backendDestination: openCensus, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-opencensus", diff --git a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go index 8c3f462aca4..c8a7b73d9d9 100644 --- a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "contrib.go.opencensus.io/exporter/prometheus" + prom "contrib.go.opencensus.io/exporter/prometheus" "go.opencensus.io/stats/view" "go.uber.org/zap" ) @@ -32,7 +32,7 @@ var ( ) func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { - e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component}) + e, err := prom.NewExporter(prom.Options{Namespace: config.component}) if err != nil { logger.Errorw("Failed to create the Prometheus exporter.", zap.Error(err)) return nil, err @@ -61,7 +61,7 @@ func resetCurPromSrv() { } } -func startNewPromSrv(e *prometheus.Exporter, port int) *http.Server { +func startNewPromSrv(e *prom.Exporter, port int) *http.Server { sm := http.NewServeMux() sm.Handle("/metrics", e) curPromSrvMux.Lock() diff --git a/vendor/knative.dev/pkg/metrics/prometheus_exporter_test.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter_test.go index 260e5c18554..51a95cb63dc 100644 --- a/vendor/knative.dev/pkg/metrics/prometheus_exporter_test.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter_test.go @@ -29,7 +29,7 @@ func TestNewPrometheusExporter(t *testing.T) { config: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, prometheusPort: 9090, }, expectedAddr: ":9090", @@ -38,7 +38,7 @@ func TestNewPrometheusExporter(t *testing.T) { config: metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Prometheus, + backendDestination: prometheus, prometheusPort: 9091, }, expectedAddr: ":9091", diff --git a/vendor/knative.dev/pkg/metrics/record.go b/vendor/knative.dev/pkg/metrics/record.go index d6e433a1768..c4627917980 100644 --- a/vendor/knative.dev/pkg/metrics/record.go +++ b/vendor/knative.dev/pkg/metrics/record.go @@ -40,7 +40,7 @@ func RecordBatch(ctx context.Context, mss ...stats.Measurement) { // be used to create a view.Distribution. func Buckets125(low, high float64) []float64 { buckets := []float64{low} - for last := low; last < high; last = last * 10 { + for last := low; last < high; last *= 10 { buckets = append(buckets, 2*last, 5*last, 10*last) } return buckets diff --git a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go index e4c2d08dce1..5974e6bcfab 100644 --- a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go +++ b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go @@ -21,7 +21,7 @@ import ( "path" "sync" - "contrib.go.opencensus.io/exporter/stackdriver" + sd "contrib.go.opencensus.io/exporter/stackdriver" "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" @@ -60,7 +60,7 @@ var ( // In product usage, this is always set to function newOpencensusSDExporter. // In unit tests this is set to a fake one to avoid calling actual Google API // service. - newStackdriverExporterFunc func(stackdriver.Options) (view.Exporter, error) + newStackdriverExporterFunc func(sd.Options) (view.Exporter, error) // kubeclient is the in-cluster Kubernetes kubeclient, which is lazy-initialized on first use. kubeclient *kubernetes.Clientset @@ -100,8 +100,8 @@ func init() { kubeclientInitErr = nil } -func newOpencensusSDExporter(o stackdriver.Options) (view.Exporter, error) { - e, err := stackdriver.NewExporter(o) +func newOpencensusSDExporter(o sd.Options) (view.Exporter, error) { + e, err := sd.NewExporter(o) if err != nil { return nil, err } @@ -123,7 +123,7 @@ func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (v logger.Warnw("Issue configuring Stackdriver exporter client options, no additional client options will be used: ", zap.Error(err)) } // Automatically fall back on Google application default credentials - e, err := newStackdriverExporterFunc(stackdriver.Options{ + e, err := newStackdriverExporterFunc(sd.Options{ ProjectID: gm.project, Location: gm.location, MonitoringClientOptions: co, @@ -131,7 +131,7 @@ func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (v GetMetricPrefix: mpf, ResourceByDescriptor: getResourceByDescriptorFunc(config.stackdriverMetricTypePrefix, gm), ReportingInterval: config.reportingPeriod, - DefaultMonitoringLabels: &stackdriver.Labels{}, + DefaultMonitoringLabels: &sd.Labels{}, }) if err != nil { logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err)) diff --git a/vendor/knative.dev/pkg/metrics/stackdriver_exporter_test.go b/vendor/knative.dev/pkg/metrics/stackdriver_exporter_test.go index e8890104567..13226b2b094 100644 --- a/vendor/knative.dev/pkg/metrics/stackdriver_exporter_test.go +++ b/vendor/knative.dev/pkg/metrics/stackdriver_exporter_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "contrib.go.opencensus.io/exporter/stackdriver" + sd "contrib.go.opencensus.io/exporter/stackdriver" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" corev1 "k8s.io/api/core/v1" @@ -170,7 +170,7 @@ type fakeExporter struct{} func (fe *fakeExporter) ExportView(vd *view.Data) {} func (fe *fakeExporter) Flush() {} -func newFakeExporter(o stackdriver.Options) (view.Exporter, error) { +func newFakeExporter(o sd.Options) (view.Exporter, error) { return &fakeExporter{}, nil } @@ -357,7 +357,7 @@ func TestNewStackdriverExporterWithMetadata(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: "autoscaler", - backendDestination: Stackdriver, + backendDestination: stackdriver, stackdriverClientConfig: StackdriverClientConfig{ ProjectID: testProj, }, @@ -379,7 +379,7 @@ func TestNewStackdriverExporterWithMetadata(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -397,7 +397,7 @@ func TestNewStackdriverExporterWithMetadata(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -415,7 +415,7 @@ func TestNewStackdriverExporterWithMetadata(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), @@ -432,7 +432,7 @@ func TestNewStackdriverExporterWithMetadata(t *testing.T) { config: &metricsConfig{ domain: servingDomain, component: testComponent, - backendDestination: Stackdriver, + backendDestination: stackdriver, reportingPeriod: 60 * time.Second, isStackdriverBackend: true, stackdriverMetricTypePrefix: path.Join(servingDomain, testComponent), diff --git a/vendor/knative.dev/pkg/metrics/testing.go b/vendor/knative.dev/pkg/metrics/testing.go index d33668eadf5..eb048f9cbaa 100644 --- a/vendor/knative.dev/pkg/metrics/testing.go +++ b/vendor/knative.dev/pkg/metrics/testing.go @@ -19,7 +19,7 @@ package metrics // InitForTesting initialize the necessary global variables for unit tests. func InitForTesting() { setCurMetricsConfig(&metricsConfig{ - backendDestination: Prometheus, + backendDestination: prometheus, component: "test", domain: "test", }) diff --git a/vendor/knative.dev/pkg/network/transports.go b/vendor/knative.dev/pkg/network/transports.go index 745e6a5a861..dcb07a1750c 100644 --- a/vendor/knative.dev/pkg/network/transports.go +++ b/vendor/knative.dev/pkg/network/transports.go @@ -96,15 +96,15 @@ func newHTTPTransport(connTimeout time.Duration, disableKeepAlives bool) http.Ro return &http.Transport{ // Those match net/http/transport.go Proxy: http.ProxyFromEnvironment, - MaxIdleConns: 1000, - MaxIdleConnsPerHost: 100, - IdleConnTimeout: 5 * time.Second, + IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, DisableKeepAlives: disableKeepAlives, - // This is bespoke. - DialContext: DialWithBackOff, + // Those are bespoke. + DialContext: DialWithBackOff, + MaxIdleConns: 1000, + MaxIdleConnsPerHost: 100, } } diff --git a/vendor/knative.dev/pkg/pool/OWNERS b/vendor/knative.dev/pkg/pool/OWNERS index f4a5f11dcfd..5c0a9d03544 100644 --- a/vendor/knative.dev/pkg/pool/OWNERS +++ b/vendor/knative.dev/pkg/pool/OWNERS @@ -3,5 +3,8 @@ approvers: - controller-approvers +reviewers: +- controller-reviewers + labels: - area/API diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS index afa22257a26..0b270d53af0 100644 --- a/vendor/knative.dev/pkg/reconciler/OWNERS +++ b/vendor/knative.dev/pkg/reconciler/OWNERS @@ -2,3 +2,6 @@ approvers: - controller-approvers + +reviewers: +- controller-reviewers diff --git a/vendor/knative.dev/pkg/reconciler/leader.go b/vendor/knative.dev/pkg/reconciler/leader.go new file mode 100644 index 00000000000..6fece8526df --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/leader.go @@ -0,0 +1,117 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "sync" + + "k8s.io/apimachinery/pkg/types" +) + +// Bucket is an opaque type used to scope leadership. +type Bucket interface { + // Name returns a string representing this bucket, which uniquely + // identifies the bucket and is suitable for use as a resource lock name. + Name() string + + // Has determines whether this Bucket contains a particular key. + Has(key types.NamespacedName) bool +} + +// UniversalBucket returns a Bucket that "Has()" all keys. +func UniversalBucket() Bucket { + return &bucket{} +} + +type bucket struct{} + +var _ Bucket = (*bucket)(nil) + +// Name implements Bucket +func (b *bucket) Name() string { + return "" +} + +// Has implements Bucket +func (b *bucket) Has(nn types.NamespacedName) bool { + return true +} + +// LeaderAware is implemented by Reconcilers that are aware of their leader status. +type LeaderAware interface { + // Promote is called when we become the leader of a given Bucket. It must be + // supplied with an enqueue function through which a Bucket resync may be triggered. + Promote(b Bucket, enq func(Bucket, types.NamespacedName)) error + + // Demote is called when we stop being the leader for the specified Bucket. + Demote(Bucket) +} + +// LeaderAwareFuncs implements LeaderAware using the given functions for handling +// promotion and demotion. +type LeaderAwareFuncs struct { + sync.RWMutex + buckets map[string]Bucket + + PromoteFunc func(b Bucket, enq func(Bucket, types.NamespacedName)) error + DemoteFunc func(b Bucket) +} + +var _ LeaderAware = (*LeaderAwareFuncs)(nil) + +// IsLeaderFor implements LeaderAware +func (laf *LeaderAwareFuncs) IsLeaderFor(key types.NamespacedName) bool { + laf.RLock() + defer laf.RUnlock() + + for _, bkt := range laf.buckets { + if bkt.Has(key) { + return true + } + } + return false +} + +// Promote implements LeaderAware +func (laf *LeaderAwareFuncs) Promote(b Bucket, enq func(Bucket, types.NamespacedName)) error { + func() { + laf.Lock() + defer laf.Unlock() + if laf.buckets == nil { + laf.buckets = make(map[string]Bucket, 1) + } + laf.buckets[b.Name()] = b + }() + + if promote := laf.PromoteFunc; promote != nil { + return promote(b, enq) + } + return nil +} + +// Demote implements LeaderAware +func (laf *LeaderAwareFuncs) Demote(b Bucket) { + func() { + laf.Lock() + defer laf.Unlock() + delete(laf.buckets, b.Name()) + }() + + if demote := laf.DemoteFunc; demote != nil { + demote(b) + } +} diff --git a/vendor/knative.dev/pkg/reconciler/leader_test.go b/vendor/knative.dev/pkg/reconciler/leader_test.go new file mode 100644 index 00000000000..cb57b8c7c62 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/leader_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/types" +) + +func TestLeaderAwareFuncs(t *testing.T) { + laf := LeaderAwareFuncs{} + wantBkt := UniversalBucket() + wantKey := types.NamespacedName{ + Namespace: "foo", + Name: "bar", + } + called := false + wantFunc := func(gotBkt Bucket, gotKey types.NamespacedName) { + called = true + if !cmp.Equal(gotKey, wantKey) { + t.Errorf("key (-want, +got) = %s", cmp.Diff(wantKey, gotKey)) + } + if !cmp.Equal(gotBkt, wantBkt) { + t.Errorf("bucket (-want, +got) = %s", cmp.Diff(wantBkt, gotBkt)) + } + } + + laf.PromoteFunc = func(bkt Bucket, gotFunc func(Bucket, types.NamespacedName)) error { + gotFunc(bkt, wantKey) + if !called { + t.Error("gotFunc didn't call wantFunc!") + } + + // IsLeaderFor takes the bucket's lock, so make sure that the callback + // we provide is not called while the lock is still held by calling a + // function that we know takes the lock. + if !laf.IsLeaderFor(wantKey) { + t.Error("IsLeaderFor() = false, wanted true") + } + return nil + } + laf.DemoteFunc = func(bkt Bucket) { + // Check that we're not called while the lock is held, + // and that we are no longer leader. + if laf.IsLeaderFor(wantKey) { + t.Error("IsLeaderFor() = true, wanted false") + } + } + + // We don't start as leader. + if laf.IsLeaderFor(wantKey) { + t.Error("IsLeaderFor() = true, wanted false") + } + + // After Promote we are leader. + laf.Promote(wantBkt, wantFunc) + if !laf.IsLeaderFor(wantKey) { + t.Error("IsLeaderFor() = false, wanted true") + } + + // After Demote we are not leader. + laf.Demote(wantBkt) + if laf.IsLeaderFor(wantKey) { + t.Error("IsLeaderFor() = true, wanted false") + } +} diff --git a/vendor/knative.dev/pkg/reconciler/reconcile_common.go b/vendor/knative.dev/pkg/reconciler/reconcile_common.go index 6a32c06f7e7..a807edfb688 100644 --- a/vendor/knative.dev/pkg/reconciler/reconcile_common.go +++ b/vendor/knative.dev/pkg/reconciler/reconcile_common.go @@ -18,7 +18,12 @@ package reconciler import ( "context" + "reflect" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/logging" ) @@ -29,30 +34,60 @@ const failedGenerationBump = "NewObservedGenFailure" func PreProcessReconcile(ctx context.Context, resource duckv1.KRShaped) { newStatus := resource.GetStatus() - if newStatus.ObservedGeneration != resource.GetGeneration() { - condSet := resource.GetConditionSet() - manager := condSet.Manage(newStatus) - // Ensure conditions are initialized before we modify. - manager.InitializeConditions() + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + if d, ok := resource.(apis.Defaultable); ok { + d.SetDefaults(ctx) + } + // Ensure conditions are initialized before we modify. + condSet := resource.GetConditionSet() + manager := condSet.Manage(newStatus) + manager.InitializeConditions() + + if newStatus.ObservedGeneration != resource.GetGeneration() { // Reset Ready/Successful to unknown. The reconciler is expected to overwrite this. manager.MarkUnknown(condSet.GetTopLevelConditionType(), failedGenerationBump, "unsuccessfully observed a new generation") } } // PostProcessReconcile contains logic to apply after reconciliation of a resource. -func PostProcessReconcile(ctx context.Context, resource duckv1.KRShaped) { +func PostProcessReconcile(ctx context.Context, resource, oldResource duckv1.KRShaped) { logger := logging.FromContext(ctx) - newStatus := resource.GetStatus() - mgr := resource.GetConditionSet().Manage(newStatus) + status := resource.GetStatus() + mgr := resource.GetConditionSet().Manage(status) // Bump observed generation to denote that we have processed this // generation regardless of success or failure. - newStatus.ObservedGeneration = resource.GetGeneration() + status.ObservedGeneration = resource.GetGeneration() if rc := mgr.GetTopLevelCondition(); rc == nil { logger.Warn("A reconciliation included no top-level condition") } else if rc.Reason == failedGenerationBump { logger.Warn("A reconciler observed a new generation without updating the resource status") } + + groomConditionsTransitionTime(resource, oldResource) +} + +// groomConditionsTransitionTime ensures that the LastTransitionTime only advances for resources +// where the condition has changed during reconciliation. This also ensures that all advanced +// conditions share the same timestamp. +func groomConditionsTransitionTime(resource, oldResource duckv1.KRShaped) { + now := apis.VolatileTime{Inner: metav1.NewTime(time.Now())} + sts := resource.GetStatus() + for i := range sts.Conditions { + cond := &sts.Conditions[i] + + if oldCond := oldResource.GetStatus().GetCondition(cond.Type); oldCond != nil { + cond.LastTransitionTime = oldCond.LastTransitionTime + if reflect.DeepEqual(cond, oldCond) { + continue + } + } + + cond.LastTransitionTime = now + } } diff --git a/vendor/knative.dev/pkg/reconciler/reconcile_common_test.go b/vendor/knative.dev/pkg/reconciler/reconcile_common_test.go index a1858fb2fdb..bda8540ac09 100644 --- a/vendor/knative.dev/pkg/reconciler/reconcile_common_test.go +++ b/vendor/knative.dev/pkg/reconciler/reconcile_common_test.go @@ -19,6 +19,7 @@ package reconciler import ( "context" "testing" + "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +35,10 @@ type TestResource struct { Status duckv1.Status `json:"status"` } +func (t *TestResource) SetDefaults(context.Context) { + t.Annotations = map[string]string{"default": "was set"} +} + func (t *TestResource) GetStatus() *duckv1.Status { return &t.Status } @@ -72,6 +77,10 @@ func TestPreProcess(t *testing.T) { PreProcessReconcile(context.Background(), krShape) + if resource.Annotations["default"] != "was set" { + t.Errorf("Expected default annotations set got=%v", resource.Annotations) + } + if rc := resource.Status.GetCondition("Ready"); rc.Status != "Unknown" { t.Errorf("Expected unchanged ready status got=%s want=Unknown", rc.Status) } @@ -91,13 +100,43 @@ func TestPostProcessReconcileBumpsGeneration(t *testing.T) { resource := makeResource() krShape := duckv1.KRShaped(resource) - PostProcessReconcile(context.Background(), krShape) + PostProcessReconcile(context.Background(), krShape, krShape) if resource.Status.ObservedGeneration != resource.Generation { - t.Errorf("Expected observed generation bump got=%d want=%d", resource.Status.ObservedGeneration, resource.Generation) + t.Errorf("Expected observed generation bump got=%d want=%d", + resource.Status.ObservedGeneration, resource.Generation) } if krShape.GetStatus().ObservedGeneration != krShape.GetGeneration() { - t.Errorf("Expected observed generation bump got=%d want=%d", resource.Status.ObservedGeneration, resource.Generation) + t.Errorf("Expected observed generation bump got=%d want=%d", + resource.Status.ObservedGeneration, resource.Generation) + } +} + +func TestPostProcessReconcileUpdatesTransitionTimes(t *testing.T) { + oldNow := apis.VolatileTime{Inner: metav1.NewTime(time.Now())} + resource := makeResource() + oldResource := makeResource() + // initialize old conditions with oldNow + oldResource.Status.Conditions[0].LastTransitionTime = oldNow + oldResource.Status.Conditions[1].LastTransitionTime = oldNow + // change the second condition, but keep the old timestamp. + resource.Status.Conditions[1].LastTransitionTime = oldNow + resource.Status.Conditions[1].Status = corev1.ConditionFalse + + new := duckv1.KRShaped(resource) + old := duckv1.KRShaped(oldResource) + PostProcessReconcile(context.Background(), new, old) + + unchangedCond := resource.Status.Conditions[0] + if unchangedCond.LastTransitionTime != oldNow { + t.Errorf("Expected unchanged condition to keep old timestamp. Got=%v Want=%v", + unchangedCond.LastTransitionTime, oldNow) + } + + changedCond := resource.Status.Conditions[1] + if changedCond.LastTransitionTime == oldNow { + t.Errorf("Expected changed condition to get a new timestamp. Got=%v Want=%v", + changedCond.LastTransitionTime, oldNow) } } diff --git a/vendor/knative.dev/pkg/reconciler/testing/hooks_test.go b/vendor/knative.dev/pkg/reconciler/testing/hooks_test.go index 0fc718f48a6..a6d2979276a 100644 --- a/vendor/knative.dev/pkg/reconciler/testing/hooks_test.go +++ b/vendor/knative.dev/pkg/reconciler/testing/hooks_test.go @@ -134,7 +134,7 @@ func TestMultiUpdate(t *testing.T) { updates := 0 h.OnUpdate(&f.Fake, "pods", func(obj runtime.Object) HookResult { - updates = updates + 1 + updates++ switch updates { case 1: case 2: diff --git a/vendor/knative.dev/pkg/test/gcs/gcs.go b/vendor/knative.dev/pkg/test/gcs/gcs.go index b2798ac6a50..13ae206fa64 100644 --- a/vendor/knative.dev/pkg/test/gcs/gcs.go +++ b/vendor/knative.dev/pkg/test/gcs/gcs.go @@ -146,9 +146,17 @@ func (g *GCSClient) getObjectsAttrs(ctx context.Context, bucketName, storagePath return allAttrs, nil } +func (g *GCSClient) listChildren(ctx context.Context, bucketName, dirPath, exclusionFilter string) ([]string, error) { + if dirPath != "" { + dirPath = strings.TrimRight(dirPath, " /") + "/" + } + + return g.list(ctx, bucketName, dirPath, exclusionFilter) +} + // ListChildrenFiles recursively lists all children files. func (g *GCSClient) ListChildrenFiles(ctx context.Context, bucketName, dirPath string) ([]string, error) { - return g.list(ctx, bucketName, strings.TrimRight(dirPath, " /")+"/", "") + return g.listChildren(ctx, bucketName, dirPath, "") } // ListDirectChildren lists direct children paths (including files and directories). @@ -156,7 +164,7 @@ func (g *GCSClient) ListDirectChildren(ctx context.Context, bucketName, dirPath // If there are 2 directories named "foo" and "foobar", // then given storagePath "foo" will get files both under "foo" and "foobar". // Add trailling slash to storagePath, so that only gets children under given directory. - return g.list(ctx, bucketName, strings.TrimRight(dirPath, " /")+"/", "/") + return g.listChildren(ctx, bucketName, dirPath, "/") } // CopyObject copies objects from one location to another. Assumes both source and destination buckets exist. diff --git a/vendor/knative.dev/pkg/test/gcs/mock/mock.go b/vendor/knative.dev/pkg/test/gcs/mock/mock.go index 44b25e3ec70..801b0f1e320 100644 --- a/vendor/knative.dev/pkg/test/gcs/mock/mock.go +++ b/vendor/knative.dev/pkg/test/gcs/mock/mock.go @@ -95,7 +95,7 @@ func (c *clientMocker) getError(funcName Method) (bool, error) { delete(c.err, funcName) return true, val.Err } - val.NumCall = val.NumCall - 1 + val.NumCall-- } return false, nil } diff --git a/vendor/knative.dev/pkg/test/ghutil/fakeghutil/fakeghutil.go b/vendor/knative.dev/pkg/test/ghutil/fakeghutil/fakeghutil.go index 7ac563580d8..51d8e2bd939 100644 --- a/vendor/knative.dev/pkg/test/ghutil/fakeghutil/fakeghutil.go +++ b/vendor/knative.dev/pkg/test/ghutil/fakeghutil/fakeghutil.go @@ -333,7 +333,7 @@ func (fgc *FakeGithubClient) CreatePullRequest(org, repo, head, base, title, bod // ListBranches lists branchs for given repo func (fgc *FakeGithubClient) ListBranches(org, repo string) ([]*github.Branch, error) { - var branches []*github.Branch + branches := make([]*github.Branch, 0, len(fgc.Branches)) for _, b := range fgc.Branches { branches = append(branches, b...) } diff --git a/vendor/knative.dev/pkg/test/interactive/docker_test.go b/vendor/knative.dev/pkg/test/interactive/docker_test.go index 29dc297c8cd..b43f4413fcc 100644 --- a/vendor/knative.dev/pkg/test/interactive/docker_test.go +++ b/vendor/knative.dev/pkg/test/interactive/docker_test.go @@ -40,7 +40,7 @@ func TestEnv(t *testing.T) { if !exists { break } - badName = badName + "z" + badName += "z" } err = e.PromoteFromEnv(badName) if err == nil { diff --git a/vendor/knative.dev/pkg/test/presubmit-tests.sh b/vendor/knative.dev/pkg/test/presubmit-tests.sh index 261599f7168..af8199dd008 100755 --- a/vendor/knative.dev/pkg/test/presubmit-tests.sh +++ b/vendor/knative.dev/pkg/test/presubmit-tests.sh @@ -37,4 +37,16 @@ function pre_build_tests() { return 0 } +# Run the unit tests with an additional flag '-mod=vendor' to avoid +# downloading the deps in unit tests CI job +function unit_tests() { + # Run the default way. + default_unit_test_runner || failed=1 + + # Run unit testing select packages without race detection, + # so that they may use: // +build !race + report_go_test ./leaderelection || failed=1 + +} + main $@ diff --git a/vendor/knative.dev/pkg/test/spoof/spoof.go b/vendor/knative.dev/pkg/test/spoof/spoof.go index c6e1ffae770..c3a0f7b8779 100644 --- a/vendor/knative.dev/pkg/test/spoof/spoof.go +++ b/vendor/knative.dev/pkg/test/spoof/spoof.go @@ -34,9 +34,9 @@ import ( "knative.dev/pkg/test/ingress" "knative.dev/pkg/test/logging" "knative.dev/pkg/test/zipkin" + "knative.dev/pkg/tracing/propagation/tracecontextb3" "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" ) @@ -135,7 +135,7 @@ func New( // Enable Zipkin tracing roundTripper := &ochttp.Transport{ Base: transport, - Propagation: &b3.HTTPFormat{}, + Propagation: tracecontextb3.TraceContextEgress, } sc := SpoofingClient{ @@ -223,7 +223,7 @@ func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker, error for _, checker := range errorRetryCheckers { retry, newErr := checker(err) if retry { - sc.Logf("Retrying %s: %v", req.URL, newErr) + sc.Logf("Retrying %s: %v", req.URL.String(), newErr) return false, nil } } diff --git a/vendor/knative.dev/pkg/test/webhook-apicoverage/view/html_template.go b/vendor/knative.dev/pkg/test/webhook-apicoverage/view/html_template.go index 89084c8fe1e..15539598deb 100644 --- a/vendor/knative.dev/pkg/test/webhook-apicoverage/view/html_template.go +++ b/vendor/knative.dev/pkg/test/webhook-apicoverage/view/html_template.go @@ -16,11 +16,7 @@ limitations under the License. package view -import ( - "fmt" -) - -var TypeCoverageTempl = fmt.Sprint(` +var TypeCoverageTempl = `